pax_global_header00006660000000000000000000000064147462113250014517gustar00rootroot0000000000000052 comment=ffec1a5dac2932243a30b2eda912caa3c38865e8 nginx-plus-go-client-2.3.0/000077500000000000000000000000001474621132500155045ustar00rootroot00000000000000nginx-plus-go-client-2.3.0/.editorconfig000066400000000000000000000003111474621132500201540ustar00rootroot00000000000000root = true [*] charset = utf-8 end_of_line = lf insert_final_newline = true trim_trailing_whitespace = true indent_size = 4 indent_style = tab [*.{md,yml,yaml}] indent_size = 2 indent_style = space nginx-plus-go-client-2.3.0/.github/000077500000000000000000000000001474621132500170445ustar00rootroot00000000000000nginx-plus-go-client-2.3.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001474621132500212275ustar00rootroot00000000000000nginx-plus-go-client-2.3.0/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000010451474621132500237210ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior, such as: 1. Try adding upstream through the client 2. Returns a panic 3. Here is the stacktrace **Expected behavior** A clear and concise description of what you expected to happen. **Your environment** - Version of nginx-plus-go-client - Version of NGINX Plus - Version of the OS **Additional context** Add any other context about the problem here. nginx-plus-go-client-2.3.0/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000010411474621132500247500ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context about the feature request here. nginx-plus-go-client-2.3.0/.github/PULL_REQUEST_TEMPLATE.md000066400000000000000000000013571474621132500226530ustar00rootroot00000000000000### Proposed changes Describe the use case and detail of the change. If this PR addresses an issue on GitHub, make sure to include a link to that issue here in this description (not in the title of the PR). ### Checklist Before creating a PR, run through this checklist and mark each as complete. - [ ] I have read the [CONTRIBUTING](https://github.com/nginx/nginx-plus-go-client/blob/main/CONTRIBUTING.md) doc - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] I have checked that all unit tests pass after adding my changes - [ ] I have updated necessary documentation - [ ] I have rebased my branch onto main - [ ] I will ensure my PR is targeting the main branch and pulling from my branch from my own fork nginx-plus-go-client-2.3.0/.github/release.yml000066400000000000000000000010651474621132500212110ustar00rootroot00000000000000changelog: exclude: labels: - skip changelog categories: - title: ๐Ÿ’ฃ Breaking Changes labels: - change - title: ๐Ÿš€ Features labels: - enhancement - title: ๐Ÿ› Bug Fixes labels: - bug - title: ๐Ÿงช Tests labels: - tests - title: ๐Ÿ”จ Maintenance labels: - chore - title: ๐Ÿ“ Documentation labels: - documentation - title: โฌ†๏ธ Dependencies labels: - dependencies - title: Other Changes labels: - "*" nginx-plus-go-client-2.3.0/.github/workflows/000077500000000000000000000000001474621132500211015ustar00rootroot00000000000000nginx-plus-go-client-2.3.0/.github/workflows/ci.yml000066400000000000000000000066361474621132500222320ustar00rootroot00000000000000name: Continuous Integration on: push: branches: - main tags: - "v[0-9]+.[0-9]+.[0-9]+" pull_request: branches: - main schedule: - cron: "0 5 * * *" # runs every day at 5am UTC merge_group: types: - checks_requested defaults: run: shell: bash concurrency: group: ${{ github.ref_name }}-ci cancel-in-progress: true permissions: contents: read jobs: unit-test: name: Unit Test runs-on: ubuntu-24.04 permissions: contents: read strategy: fail-fast: false matrix: go-version: ["1.21", "1.22", stable] steps: - name: Checkout Repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Golang Environment uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: ${{ matrix.go-version }} - name: Run Unit Tests run: make unit-test build: name: Build Client runs-on: ubuntu-24.04 permissions: contents: write issues: write if: ${{ github.event.repository.fork == false }} steps: - name: Checkout Repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Switch Repository (Nightly) if: (github.event_name == 'schedule') run: | sed -i 's|pkgs.nginx.com|pkgs-test.nginx.com|g' docker/Dockerfile sed -i '16d' docker/Dockerfile sed -i "17i sed -i 's|pkgs|pkgs-test|g' /etc/apt/apt.conf.d/90pkgs-nginx" docker/Dockerfile sed -i 's|deb https|deb [trusted=yes] https|g' docker/Dockerfile sed -i 's|\${NGINX_PLUS_VERSION}/||g' docker/Dockerfile - name: Set up Docker Buildx uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.8.0 - name: Build Plus Docker Image uses: docker/build-push-action@ca877d9245402d1537745e0e356eab47c3520991 # v6.13.0 with: file: docker/Dockerfile tags: nginx-plus cache-from: type=gha cache-to: type=gha,mode=max load: true secrets: | "nginx-repo.crt=${{ secrets.NGINX_CRT }}" "nginx-repo.key=${{ secrets.NGINX_KEY }}" - name: Test Client run: docker compose up test --exit-code-from test - name: Test Client No Stream run: docker compose up test-no-stream --exit-code-from test-no-stream - name: Create/Update Draft uses: lucacome/draft-release@5d29432a46bff6c122cd4b07a1fb94e1bb158d34 # v1.1.1 id: release-notes with: minor-label: "enhancement" major-label: "change" publish: ${{ github.ref_type == 'tag' }} collapse-after: 50 if: ${{ github.event_name == 'push' }} - name: Setup Golang Environment uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: stable if: ${{ github.ref_type == 'tag' }} - name: Run GoReleaser uses: goreleaser/goreleaser-action@9ed2f89a662bf1735a48bc8557fd212fa902bebf # v6.1.0 with: version: v2.6.1 # renovate: datasource=github-tags depName=goreleaser/goreleaser args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_COMMUNITY }} if: ${{ github.ref_type == 'tag' }} nginx-plus-go-client-2.3.0/.github/workflows/codeql-analysis.yml000066400000000000000000000064221474621132500247200ustar00rootroot00000000000000name: CodeQL on: push: branches: - main pull_request: # The branches below must be a subset of the branches above branches: - main schedule: - cron: "33 16 * * 3" # run every Wednesday at 16:33 UTC merge_group: types: - checks_requested concurrency: group: ${{ github.ref_name }}-codeql cancel-in-progress: true permissions: contents: read jobs: analyze: name: Analyze runs-on: ubuntu-24.04 permissions: actions: read # for github/codeql-action/init to get workflow details contents: read # for actions/checkout to fetch code security-events: write # for github/codeql-action/autobuild to send a status report packages: read # required to fetch internal or private CodeQL packs if: ${{ github.event_name != 'merge_group' }} strategy: fail-fast: false matrix: include: - language: go build-mode: autobuild # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' # Use `c-cpp` to analyze code written in C, C++ or both # Use 'java-kotlin' to analyze code written in Java, Kotlin or both # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Golang Environment uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: stable if: matrix.language == 'go' # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 with: category: "/language:${{matrix.language}}" nginx-plus-go-client-2.3.0/.github/workflows/dependency-review.yml000066400000000000000000000015341474621132500252440ustar00rootroot00000000000000name: Dependency Review on: pull_request: branches: - main merge_group: types: - checks_requested concurrency: group: ${{ github.ref_name }}-deps-review cancel-in-progress: true permissions: contents: read jobs: dependency-review: name: Dependency Review runs-on: ubuntu-24.04 permissions: contents: read # for actions/checkout pull-requests: write # for actions/dependency-review-action to post comments if: ${{ github.event_name != 'merge_group' }} steps: - name: Checkout Repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Dependency Review uses: actions/dependency-review-action@3b139cfc5fae8b618d3eae3675e383bb1769c019 # v4.5.0 with: config-file: "nginx/k8s-common/dependency-review-config.yml@main" nginx-plus-go-client-2.3.0/.github/workflows/f5-cla.yml000066400000000000000000000042751474621132500227030ustar00rootroot00000000000000name: F5 CLA on: issue_comment: types: - created pull_request_target: types: - opened - synchronize - reopened concurrency: group: ${{ github.ref_name }}-cla permissions: contents: read jobs: f5-cla: name: F5 CLA runs-on: ubuntu-24.04 permissions: actions: write contents: read pull-requests: write statuses: write steps: - name: Run F5 Contributor License Agreement (CLA) assistant if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have hereby read the F5 CLA and agree to its terms') || github.event_name == 'pull_request_target' uses: contributor-assistant/github-action@ca4a40a7d1004f18d9960b404b97e5f30a505a08 # v2.6.1 with: # Any pull request targeting the following branch will trigger a CLA check. branch: "main" # Path to the CLA document. path-to-document: "https://github.com/f5/.github/blob/main/CLA/cla-markdown.md" # Custom CLA messages. custom-notsigned-prcomment: "๐ŸŽ‰ Thank you for your contribution! It appears you have not yet signed the F5 Contributor License Agreement (CLA), which is required for your changes to be incorporated into an F5 Open Source Software (OSS) project. Please kindly read the [F5 CLA](https://github.com/f5/.github/blob/main/CLA/cla-markdown.md) and reply on a new comment with the following text to agree:" custom-pr-sign-comment: "I have hereby read the F5 CLA and agree to its terms" custom-allsigned-prcomment: "โœ… All required contributors have signed the F5 CLA for this PR. Thank you!" # Remote repository storing CLA signatures. remote-organization-name: "f5" remote-repository-name: "f5-cla-data" path-to-signatures: "signatures/beta/signatures.json" # Comma separated list of usernames for maintainers or any other individuals who should not be prompted for a CLA. allowlist: bot* # Do not lock PRs after a merge. lock-pullrequest-aftermerge: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} PERSONAL_ACCESS_TOKEN: ${{ secrets.F5_CLA_TOKEN }} nginx-plus-go-client-2.3.0/.github/workflows/fossa.yml000066400000000000000000000011241474621132500227350ustar00rootroot00000000000000name: Fossa on: push: branches: - main paths-ignore: - "**.md" concurrency: group: ${{ github.ref_name }}-fossa cancel-in-progress: true permissions: contents: read jobs: scan: name: Fossa runs-on: ubuntu-24.04 if: ${{ github.event.repository.fork == false }} steps: - name: Checkout Repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Scan uses: fossas/fossa-action@93a52ecf7c3ac7eb40f5de77fd69b1a19524de94 # v1.5.0 with: api-key: ${{ secrets.FOSSA_TOKEN }} nginx-plus-go-client-2.3.0/.github/workflows/labeler.yml000066400000000000000000000012621474621132500232330ustar00rootroot00000000000000name: "Pull Request Labeler" on: - pull_request_target permissions: contents: read jobs: triage: permissions: contents: read pull-requests: write # for actions/labeler to add labels runs-on: ubuntu-24.04 steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: sparse-checkout: | labeler.yml sparse-checkout-cone-mode: false repository: nginx/k8s-common - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" sync-labels: true configuration-path: labeler.yml nginx-plus-go-client-2.3.0/.github/workflows/lint.yml000066400000000000000000000035401474621132500225740ustar00rootroot00000000000000name: Lint on: pull_request: branches: - main merge_group: types: - checks_requested defaults: run: shell: bash concurrency: group: ${{ github.ref_name }}-lint cancel-in-progress: true permissions: contents: read jobs: lint: name: Go Lint runs-on: ubuntu-24.04 steps: - name: Checkout Repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Setup Golang Environment uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: stable - name: Lint Go uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 with: version: v1.63.4 # renovate: datasource=github-tags depName=golangci/golangci-lint actionlint: name: Actionlint runs-on: ubuntu-24.04 steps: - name: Checkout Repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Lint Actions uses: reviewdog/action-actionlint@abd537417cf4991e1ba8e21a67b1119f4f53b8e0 # v1.64.1 with: actionlint_flags: -shellcheck "" markdown-lint: name: Markdown Lint runs-on: ubuntu-24.04 steps: - name: Checkout Repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Lint Markdown uses: DavidAnson/markdownlint-cli2-action@05f32210e84442804257b2a6f20b273450ec8265 # v19.1.0 with: config: .markdownlint-cli2.yaml globs: "**/*.md" fix: false yaml-lint: name: YAML lint runs-on: ubuntu-24.04 steps: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Lint YAML uses: reviewdog/action-yamllint@1dca3ad811867be18fbe293a9818d715a6c2cd46 # v1.20.0 nginx-plus-go-client-2.3.0/.github/workflows/notifications.yml000066400000000000000000000055461474621132500245070ustar00rootroot00000000000000name: Notification on: workflow_run: branches: main workflows: - "Continuous Integration" - "CodeQL" - "Fossa" - "Lint" types: - completed permissions: contents: read jobs: on-failure: runs-on: ubuntu-24.04 if: ${{ github.event.workflow_run.conclusion == 'failure' && github.event.repository.fork == false }} permissions: contents: read actions: read # for 8398a7/action-slack steps: - name: Data uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 continue-on-error: true id: data with: script: | const message = context.payload.workflow_run.head_commit.message message_sanitized = message.split('\n')[0] const check_data = (await github.rest.checks.listForRef({ owner: context.payload.repository.owner.login, repo: context.payload.repository.name, ref: context.payload.workflow_run.head_commit.id, })).data.check_runs.filter(check_run => check_run.conclusion === 'failure')[0] return { job_name: check_data.name, job_url: check_data.html_url, commit_message: message_sanitized, } - name: Send Notification uses: 8398a7/action-slack@28ba43ae48961b90635b50953d216767a6bea486 # v3.16.2 with: status: custom custom_payload: | { username: 'Github', icon_emoji: ':github:', mention: 'channel', attachments: [{ title: '[${{ github.event.repository.full_name }}] ${{ github.event.workflow.name }} pipeline has failed (${{ github.event.workflow_run.event }})', color: 'danger', fields: [{ title: 'Commit', value: ``, short: true }, { title: 'Failed Job', value: `<${{ fromJSON(steps.data.outputs.result).job_url }}|${{ fromJSON(steps.data.outputs.result).job_name }}>`, short: true }, { title: 'Author', value: `${{ github.event.workflow_run.head_commit.author.name }}`, short: true }, { title: 'Pipeline URL', value: ``, short: true }] }] } env: SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK }} nginx-plus-go-client-2.3.0/.github/workflows/scorecard.yml000066400000000000000000000043111474621132500235700ustar00rootroot00000000000000name: OpenSSF Scorecards on: branch_protection_rule: # yamllint disable-line rule:empty-values schedule: - cron: "42 15 * * 6" # run every Saturday at 15:42 UTC push: branches: - main # Declare default permissions as read only. permissions: read-all jobs: analysis: name: Scorecard analysis runs-on: ubuntu-latest if: ${{ github.event.repository.fork == false }} permissions: # Needed to upload the results to code-scanning dashboard. security-events: write # Needed to publish results and get a badge (see publish_results below). id-token: write # Uncomment the permissions below if installing in a private repository. # contents: read # actions: read steps: - name: "Checkout code" uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: persist-credentials: false - name: "Run analysis" uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 with: results_file: results.sarif results_format: sarif repo_token: ${{ secrets.SCORECARD_TOKEN }} # Public repositories: # - Publish results to OpenSSF REST API for easy access by consumers # - Allows the repository to include the Scorecard badge. # - See https://github.com/ossf/scorecard-action#publishing-results. # For private repositories: # - `publish_results` will always be set to `false`, regardless # of the value entered here. publish_results: true # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 with: name: SARIF file path: results.sarif retention-days: 5 # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" uses: github/codeql-action/upload-sarif@f6091c0113d1dcf9b98e269ee48e8a7e51b7bdd4 # v3.28.5 with: sarif_file: results.sarif nginx-plus-go-client-2.3.0/.github/workflows/stale.yml000066400000000000000000000023421474621132500227350ustar00rootroot00000000000000name: "Close stale issues and PRs" on: schedule: - cron: "30 1 * * *" permissions: contents: read jobs: stale: permissions: issues: write # for actions/stale to close stale issues pull-requests: write # for actions/stale to close stale PRs runs-on: ubuntu-24.04 steps: - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: "This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days." stale-pr-message: "This PR is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 7 days." close-issue-message: "This issue was closed because it has been stalled for 7 days with no activity." close-pr-message: "This PR was closed because it has been stalled for 7 days with no activity." stale-issue-label: "stale" stale-pr-label: "stale" exempt-all-issue-milestones: true exempt-issue-labels: "backlog, backlog candidate, epic" exempt-all-pr-assignees: true operations-per-run: 100 nginx-plus-go-client-2.3.0/.gitignore000066400000000000000000000001561474621132500174760ustar00rootroot00000000000000# NGINX Plus license files *.crt *.key # Visual Studio Code settings .vscode # Goland settings .idea/ dist nginx-plus-go-client-2.3.0/.golangci.yml000066400000000000000000000040271474621132500200730ustar00rootroot00000000000000linters-settings: misspell: locale: US revive: ignore-generated-header: true rules: - name: blank-imports - name: constant-logical-expr - name: context-as-argument - name: context-keys-type - name: defer - name: dot-imports - name: duplicated-imports - name: empty-block - name: error-naming - name: error-return - name: error-strings - name: errorf - name: exported - name: import-shadowing - name: increment-decrement - name: indent-error-flow - name: package-comments - name: range - name: range-val-address - name: range-val-in-closure - name: receiver-naming - name: redefines-builtin-id - name: string-of-int - name: superfluous-else - name: time-naming - name: unchecked-type-assertion - name: unexported-return - name: unnecessary-stmt - name: unreachable-code - name: unused-parameter - name: var-declaration - name: var-naming govet: enable-all: true linters: enable: - asasalint - asciicheck - bidichk - containedctx - contextcheck - copyloopvar - dupword - durationcheck - err113 - errcheck - errchkjson - errname - errorlint - fatcontext - forcetypeassert - gocheckcompilerdirectives - gochecksumtype - gocritic - godot - gofmt - gofumpt - goimports - gosec - gosimple - gosmopolitan - govet - ineffassign - intrange - makezero - mirror - misspell - musttag - nilerr - noctx - nolintlint - perfsprint - prealloc - predeclared - paralleltest - reassign - revive - staticcheck - stylecheck - tagalign - tenv - thelper - tparallel - typecheck - unconvert - unparam - unused - usestdlibvars - wastedassign - whitespace - wrapcheck disable-all: true issues: max-issues-per-linter: 0 max-same-issues: 0 run: timeout: 5m nginx-plus-go-client-2.3.0/.goreleaser.yaml000066400000000000000000000003771474621132500206050ustar00rootroot00000000000000version: 2 builds: - skip: true changelog: disable: true announce: slack: enabled: true channel: "#announcements" message_template: "NGINX Plus Go Client {{ .Tag }} is out! Check it out: {{ .ReleaseURL }}" milestones: - close: true nginx-plus-go-client-2.3.0/.markdownlint-cli2.yaml000066400000000000000000000006041474621132500220060ustar00rootroot00000000000000# Rule configuration. # For rule descriptions and how to fix: https://github.com/DavidAnson/markdownlint/tree/main#rules--aliases config: ul-style: style: dash no-duplicate-heading: siblings_only: true line-length: line_length: 120 code_blocks: false tables: false # Define glob expressions to ignore ignores: - ".github/" # Fix any fixable errors fix: true nginx-plus-go-client-2.3.0/.pre-commit-config.yaml000066400000000000000000000030031474621132500217610ustar00rootroot00000000000000# See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: check-yaml args: [--allow-multiple-documents] - id: check-added-large-files - id: check-merge-conflict - id: check-shebang-scripts-are-executable - id: check-executables-have-shebangs - id: check-case-conflict - id: check-vcs-permalinks - id: check-json - id: pretty-format-json args: [--autofix, --no-ensure-ascii] - id: mixed-line-ending args: [--fix=lf] - id: no-commit-to-branch - id: fix-byte-order-marker - id: detect-private-key - repo: https://github.com/golangci/golangci-lint rev: v1.63.4 hooks: - id: golangci-lint-full - repo: https://github.com/gitleaks/gitleaks rev: v8.23.2 hooks: - id: gitleaks - repo: https://github.com/DavidAnson/markdownlint-cli2 rev: v0.17.2 hooks: - id: markdownlint-cli2 - repo: https://github.com/adrienverge/yamllint.git rev: v1.35.1 hooks: - id: yamllint - repo: https://github.com/thlorenz/doctoc rev: v2.2.0 hooks: - id: doctoc args: [--update-only, --title, "## Table of Contents"] ci: skip: [golangci-lint-full] autoupdate_schedule: quarterly # We use renovate for more frequent updates and there's no way to disable autoupdate nginx-plus-go-client-2.3.0/.yamllint.yaml000066400000000000000000000004451474621132500203020ustar00rootroot00000000000000--- ignore-from-file: .gitignore extends: default rules: comments: min-spaces-from-content: 1 comments-indentation: enable document-start: disable empty-values: enable line-length: max: 120 ignore: | .goreleaser.yml .github/ truthy: check-keys: false nginx-plus-go-client-2.3.0/CHANGELOG.md000066400000000000000000000102321474621132500173130ustar00rootroot00000000000000# Changelog Starting with version 0.8.0, an automatically generated list of changes can be found on the [GitHub Releases page](https://github.com/nginx/nginx-plus-go-client/releases). ## 0.7.0 (Jul 10, 2020) FEATURES: - [38](https://github.com/nginx/nginx-plus-go-client/pull/38): _Support for /slabs API endpoint_. The client now supports retrieving shared memory zone usage info. - [41](https://github.com/nginx/nginx-plus-go-client/pull/41): _Support for /processes API endpoint_. The client now supports retrieving processes info. CHANGES: - The version of NGINX Plus for e2e testing was changed to R22. - The version of Go was changed to 1.14 ## 0.6.0 (Nov 8, 2019) FEATURES: - [34](https://github.com/nginx/nginx-plus-go-client/pull/34): _Support for updating upstream servers parameters_. The client now supports updating upstream parameters of servers that already exist in NGINX Plus. CHANGES: - Public methods `UpdateHTTPServers` and `UpdateStreamServers` now return a third slice that includes the updated servers -- i.e. the servers that were already present in NGINX Plus but were updated with different parameters. - Client will assume port `80` in addresses of updated servers of `UpdateHTTPServers` and `UpdateStreamServers` if port is not explicitly set. - The version of Go was changed to 1.13 ## 0.5.0 (Sep 25, 2019) FEATURES: - [30](https://github.com/nginx/nginx-plus-go-client/pull/30): _Support additional upstream server parameters_. The client now supports configuring `route`, `backup`, `down`, `drain`, `weight` and `service` parameters for http upstreams and `backup`, `down`, `weight` and `service` parameters for stream upstreams. - [31](https://github.com/nginx/nginx-plus-go-client/pull/31): _Support location zones and resolver metrics_. FIXES: - [29](https://github.com/nginx/nginx-plus-go-client/pull/29): _Fix max_fails parameter in upstream servers_. Previously, if the MaxFails field was not explicitly set, the client would incorrectly configure an upstream with the value `0` instead of the correct value `1`. CHANGES: - The version of NGINX Plus for e2e testing was changed to R19. - The version of the API was changed to 5. ## 0.4.0 (July 17, 2019) FEATURES: - [24](https://github.com/nginx/nginx-plus-go-client/pull/24): _Support `MaxConns` in upstream servers_. FIXES: - [25](https://github.com/nginx/nginx-plus-go-client/pull/25): _Fix session metrics for stream server zones_. Session metrics with a status of `4xx` or `5xx` are now correctly reported. Previously they were always reported as `0`. ## 0.3.1 (June 10, 2019) CHANGES: - [22](https://github.com/nginx/nginx-plus-go-client/pull/22): _Change in stream zone sync metrics_. `StreamZoneSync` field of the `Stats` type is now a pointer. It will be nil if NGINX Plus doesn't report any zone sync stats. ## 0.3 (May 29, 2019) FEATURES: - [20](https://github.com/nginx/nginx-plus-go-client/pull/20): _Support for stream zone sync metrics_. The client `GetStats` method now additionally returns stream zone sync metrics. - [13](https://github.com/nginx/nginx-plus-go-client/pull/13): _Support for key-value endpoints_. The client implements a set of methods to create/modify/delete key-val pairs for both http and stream contexts. - [12](https://github.com/nginx/nginx-plus-go-client/pull/12) _Support for NGINX status info_. The client `GetStats` method now additionally returns NGINX status metrics. Thanks to [jthurman42](https://github.com/jthurman42). CHANGES: - The repository was renamed to `nginx-plus-go-client` instead of `nginx-plus-go-sdk`. If the client is used as a dependency, this name needs to be changed in the import section (`import "github.com/nginxinc/nginx-plus-go-client/client"`). - The version of the API was changed to 4. - The version of NGINX Plus for e2e testing was changed to R18. ## 0.2 (Sep 7, 2018) FEATURES: - [7](https://github.com/nginx/nginx-plus-go-client/pull/7): _Support for stream server zone and stream upstream metrics_. The client `GetStats` method now additionally returns stream server zone and stream upstream metrics. CHANGES: - The version of NGINX Plus for e2e testing was changed to R16. ## 0.1 (July 30, 2018) Initial release nginx-plus-go-client-2.3.0/CODEOWNERS000066400000000000000000000000261474621132500170750ustar00rootroot00000000000000* @nginx/integrations nginx-plus-go-client-2.3.0/CODE_OF_CONDUCT.md000066400000000000000000000063531474621132500203120ustar00rootroot00000000000000# Code of Conduct This project and everyone participating in it is governed by this code. ## Our Pledge In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. ## Our Standards Examples of behavior that contributes to creating a positive environment include: - Using welcoming and inclusive language - Being respectful of differing viewpoints and experiences - Gracefully accepting constructive criticism - Focusing on what is best for the community - Showing empathy towards other community members Examples of unacceptable behavior by participants include: - The use of sexualized language or imagery and unwelcome sexual attention or advances - Trolling, insulting/derogatory comments, and personal or political attacks - Public or private harassment - Publishing others' private information, such as a physical or electronic address, without explicit permission - Other conduct which could reasonably be considered inappropriate in a professional setting ## Our Responsibilities Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. ## Scope This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at . All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [homepage]: https://www.contributor-covenant.org nginx-plus-go-client-2.3.0/CONTRIBUTING.md000066400000000000000000000103031474621132500177320ustar00rootroot00000000000000# Contributing Guidelines The following is a set of guidelines for contributing to the NGINX Plus Go Client. We really appreciate that you are considering contributing! ## Table of Contents - [Ask a Question](#ask-a-question) - [Getting Started](#getting-started) - [Contributing](#contributing) - [Report a Bug](#report-a-bug) - [Suggest an Enhancement](#suggest-an-enhancement) - [Open a Pull Request](#open-a-pull-request) - [Issue lifecycle](#issue-lifecycle) - [F5 Contributor License Agreement (CLA)](#f5-contributor-license-agreement-cla) - [Style Guides](#style-guides) - [Git Style Guide](#git-style-guide) - [Go Style Guide](#go-style-guide) ## Ask a Question To ask a question please use [Github Discussions](https://github.com/nginx/nginx-plus-go-client/discussions). You can also join our [Community Slack](https://community.nginx.org/joinslack) which has a wider NGINX audience. Please reserve GitHub issues for feature requests and bugs rather than general questions. ## Getting Started Read the usage and testing steps in the [README](README.md). ## Contributing ### Report a Bug To report a bug, open an issue on GitHub with the label `bug` using the available bug report issue template. Please ensure the issue has not already been reported. ### Suggest an Enhancement To suggest an enhancement, please create an issue on GitHub with the label `enhancement` using the available feature issue template. ### Open a Pull Request - Fork the repo, create a branch, submit a PR when your changes are tested and ready for review - Fill in [our pull request template](.github/PULL_REQUEST_TEMPLATE.md) > **Note** > > If youโ€™d like to implement a new feature, please consider creating a feature request issue first to start a discussion > about the feature. ### Issue lifecycle - When an issue or PR is created, it will be triaged by the core development team and assigned a label to indicate the type of issue it is (bug, feature request, etc) and to determine the milestone. Please see the [Issue Lifecycle](ISSUE_LIFECYCLE.md) document for more information. ### F5 Contributor License Agreement (CLA) F5 requires all external contributors to agree to the terms of the F5 CLA (available [here](https://github.com/f5/.github/blob/main/CLA/cla-markdown.md)) before any of their changes can be incorporated into an F5 Open Source repository. If you have not yet agreed to the F5 CLA terms and submit a PR to this repository, a bot will prompt you to view and agree to the F5 CLA. You will have to agree to the F5 CLA terms through a comment in the PR before any of your changes can be merged. Your agreement signature will be safely stored by F5 and no longer be required in future PRs. ## Style Guides ### Git Style Guide - Keep a clean, concise and meaningful git commit history on your branch, rebasing locally and squashing before submitting a PR - Follow the guidelines of writing a good commit message as described here and summarized in the next few points - In the subject line, use the present tense ("Add feature" not "Added feature") - In the subject line, use the imperative mood ("Move cursor to..." not "Moves cursor to...") - Limit the subject line to 72 characters or less - Reference issues and pull requests liberally after the subject line - Add more detailed description in the body of the git message (`git commit -a` to give you more space and time in your text editor to write a good message instead of `git commit -am`) ### Go Style Guide - Run `gofmt` over your code to automatically resolve a lot of style issues. Most editors support this running automatically when saving a code file. - Run `go lint` and `go vet` on your code too to catch any other issues. - Follow this guide on some good practice and idioms for Go - - To check for extra issues, install [golangci-lint](https://github.com/golangci/golangci-lint) and run `make lint` or `golangci-lint run` nginx-plus-go-client-2.3.0/ISSUE_LIFECYCLE.md000066400000000000000000000061551474621132500203040ustar00rootroot00000000000000# Issue Lifecycle To ensure a balance between work carried out by the NGINX engineering team while encouraging community involvement on this project, we use the following issue lifecycle. (Note: The issue *creator* refers to the community member that created the issue. The issue *owner* refers to the NGINX team member that is responsible for managing the issue lifecycle.) 1. New issue created by community member. 2. Assign issue owner: All new issues are assigned an owner on the NGINX engineering team. This owner shepherds the issue through the subsequent stages in the issue lifecycle. 3. Determine issue type: This is done with automation where possible, and manually by the owner where necessary. The associated label is applied to the issue. Possible Issue Types: - `needs more info`: The owner should use the issue to request information from the creator. If we don't receive the needed information within 7 days, automation closes the issue. - `bug`: The implementation of a feature is not correct. - `proposal`: Request for a change. This can be a new feature, tackling technical debt, documentation changes, or improving existing features. - `question`: The owner converts the issue to a github discussion and engages the creator. 4. Determine milestone: The owner, in collaboration with the wider team (PM & engineering), determines what milestone to attach to an issue. Generally, milestones correspond to product releases - however there are two 'magic' milestones with special meanings (not tied to a specific release): - Issues assigned to backlog: Our team is in favour of implementing the feature request/fixing the issue, however the implementation is not yet assigned to a concrete release. If and when a `backlog` issue aligns well with our roadmap, it will be scheduled for a concrete iteration. We review and update our roadmap at least once every quarter. The `backlog` list helps us shape our roadmap, but it is not the only source of input. Therefore, some `backlog` items may eventually be closed as `out of scope`, or relabelled as `backlog candidate` once it becomes clear that they do not align with our evolving roadmap. - Issues assigned to `backlog candidate`: Our team does not intend to implement the feature/fix request described in the issue and wants the community to weigh in before we make our final decision. `backlog` issues can be labeled by the owner as `help wanted` and/or `good first issue` as appropriate. 5. Promotion of `backlog candidate` issue to `backlog` issue: If an issue labelled `backlog candidate` receives more than 30 upvotes within 60 days, we promote the issue by applying the `backlog` label. While issues promoted in this manner have not been committed to a particular release, we welcome PRs from the community on them. If an issue does not make our roadmap and has not been moved to a discussion, it is closed with the label `out of scope`. The goal is to get every issue in the issues list to one of the following end states: - An assigned release. - The `backlog` label. - Closed as `out of scope`. nginx-plus-go-client-2.3.0/LICENSE000066400000000000000000000237771474621132500165310ustar00rootroot00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 Nginx, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. nginx-plus-go-client-2.3.0/Makefile000066400000000000000000000010771474621132500171510ustar00rootroot00000000000000# renovate: datasource=github-tags depName=golangci/golangci-lint GOLANGCI_LINT_VERSION = v1.63.4 test: unit-test test-integration test-integration-no-stream-block clean lint: go run github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION) run --fix unit-test: go test -v -shuffle=on -race client/*.go test-integration: docker compose up -d --build test docker compose logs -f test test-integration-no-stream-block: docker compose up -d --build test-no-stream docker compose logs -f test-no-stream clean: docker compose down --remove-orphans nginx-plus-go-client-2.3.0/README.md000066400000000000000000000101641474621132500167650ustar00rootroot00000000000000 [![OpenSSFScorecard](https://api.securityscorecards.dev/projects/github.com/nginx/nginx-plus-go-client/badge)](https://scorecard.dev/viewer/?uri=github.com/nginx/nginx-plus-go-client) [![Continuous Integration](https://github.com/nginx/nginx-plus-go-client/workflows/Continuous%20Integration/badge.svg)](https://github.com/nginx/nginx-plus-go-client/actions) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Go Report Card](https://goreportcard.com/badge/github.com/nginx/nginx-plus-go-client)](https://goreportcard.com/report/github.com/nginx/nginx-plus-go-client) [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B5618%2Fgithub.com%2Fnginx%2Fnginx-plus-go-client.svg?type=shield)](https://app.fossa.com/projects/custom%2B5618%2Fgithub.com%2Fnginx%2Fnginx-plus-go-client?ref=badge_shield) [![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/nginx/nginx-plus-go-client?logo=github&sort=semver)](https://github.com/nginx/nginx-plus-go-client/releases/latest) ![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/nginx/nginx-plus-go-client?logo=go) [![Slack](https://img.shields.io/badge/slack-nginxcommunity-green?logo=slack)](https://nginxcommunity.slack.com) [![Project Status: Active โ€“ The project has reached a stable, usable state and is being actively developed.](https://www.repostatus.org/badges/latest/active.svg)](https://www.repostatus.org/#active) [![Community Support](https://badgen.net/badge/support/community/cyan?icon=awesome)](https://github.com/nginx/nginx-plus-go-client/blob/main/SECURITY.md) # NGINX Plus Go Client This project includes a client library for working with NGINX Plus API. ## Table of Contents - [About the Client](#about-the-client) - [Compatibility](#compatibility) - [Using the Client](#using-the-client) - [Testing](#testing) - [Unit tests](#unit-tests) - [Integration tests](#integration-tests) - [Contacts](#contacts) - [Contributing](#contributing) - [Support](#support) ## About the Client `client/nginx.go` includes functions and data structures for working with NGINX Plus API as well as some helper functions. ## Compatibility This Client works against versions 4 to 9 of the NGINX Plus API. The table below shows the version of NGINX Plus where the API was first introduced. | API version | NGINX Plus version | | ----------- | ------------------ | | 4 | R18 | | 5 | R19 | | 6 | R20 | | 7 | R25 | | 8 | R27 | | 9 | R30 | ## Using the Client 1. Import `github.com/nginx/nginx-plus-go-client/client` into your go project. 2. Use your favorite vendor tool to add this to your `/vendor` directory in your project. ## Testing ### Unit tests ```console cd client go test ``` ### Integration tests Prerequisites: - Docker - Docker Compose - golang - Make - NGINX Plus license - put `nginx-repo.crt` and `nginx-repo.key` into the `docker` folder. Run Tests: ```console make test ``` This will build and run two NGINX Plus containers and create one docker network of type bridge, execute the client tests against both NGINX Plus APIs, and then clean up. If it fails and you want to clean up (i.e. stop the running containers and remove the docker network), please use `make clean` ## Contacts Weโ€™d like to hear your feedback! If you have any suggestions or experience issues with the NGINX Plus Go Client, please create an issue or send a pull request on GitHub. You can contact us directly via or on the [NGINX Community Slack](https://nginxcommunity.slack.com). ## Contributing If you'd like to contribute to the project, please read our [Contributing guide](CONTRIBUTING.md). ## Support This project is not covered by the NGINX Plus support contract. nginx-plus-go-client-2.3.0/SECURITY.md000066400000000000000000000014061474621132500172760ustar00rootroot00000000000000# Security Policy ## Supported Versions We advise users to use the most recent release of NGINX Plus Go Client library. This project is not covered by the NGINX Plus support contract. ## Reporting a Vulnerability The F5 Security Incident Response Team (F5 SIRT) has an email alias that makes it easy to report potential security vulnerabilities. - If youโ€™re an F5 customer with an active support contract, please contact [F5 Technical Support](https://www.f5.com/services/support). - If you arenโ€™t an F5 customer, please report any potential or current instances of security vulnerabilities with any F5 product to the F5 Security Incident Response Team at For more information visit nginx-plus-go-client-2.3.0/client/000077500000000000000000000000001474621132500167625ustar00rootroot00000000000000nginx-plus-go-client-2.3.0/client/nginx.go000066400000000000000000001745311474621132500204470ustar00rootroot00000000000000package client import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "reflect" "regexp" "slices" "strconv" "strings" "sync" "time" "golang.org/x/sync/errgroup" ) const ( // APIVersion is the default version of NGINX Plus API supported by the client. APIVersion = 9 pathNotFoundCode = "PathNotFound" streamContext = true httpContext = false defaultServerPort = "80" ) var ( supportedAPIVersions = versions{4, 5, 6, 7, 8, 9} // Default values for servers in Upstreams. defaultMaxConns = 0 defaultMaxFails = 1 defaultFailTimeout = "10s" defaultSlowStart = "0s" defaultBackup = false defaultDown = false defaultWeight = 1 ) var ( ErrParameterRequired = errors.New("parameter is required") ErrServerNotFound = errors.New("server not found") ErrServerExists = errors.New("server already exists") ErrNotSupported = errors.New("not supported") ErrInvalidTimeout = errors.New("invalid timeout") ErrParameterMismatch = errors.New("encountered duplicate server with different parameters") ErrPlusVersionNotFound = errors.New("plus version not found in the input string") ) // NginxClient lets you access NGINX Plus API. type NginxClient struct { httpClient *http.Client apiEndpoint string apiVersion int checkAPI bool } type Option func(*NginxClient) type versions []int // UpstreamServer lets you configure HTTP upstreams. type UpstreamServer struct { MaxConns *int `json:"max_conns,omitempty"` MaxFails *int `json:"max_fails,omitempty"` Backup *bool `json:"backup,omitempty"` Down *bool `json:"down,omitempty"` Weight *int `json:"weight,omitempty"` Server string `json:"server"` FailTimeout string `json:"fail_timeout,omitempty"` SlowStart string `json:"slow_start,omitempty"` Route string `json:"route,omitempty"` Service string `json:"service,omitempty"` ID int `json:"id,omitempty"` Drain bool `json:"drain,omitempty"` } // StreamUpstreamServer lets you configure Stream upstreams. type StreamUpstreamServer struct { MaxConns *int `json:"max_conns,omitempty"` MaxFails *int `json:"max_fails,omitempty"` Backup *bool `json:"backup,omitempty"` Down *bool `json:"down,omitempty"` Weight *int `json:"weight,omitempty"` Server string `json:"server"` FailTimeout string `json:"fail_timeout,omitempty"` SlowStart string `json:"slow_start,omitempty"` Service string `json:"service,omitempty"` ID int `json:"id,omitempty"` } type apiErrorResponse struct { RequestID string `json:"request_id"` Href string `json:"href"` Error apiError `json:"error"` } func (resp *apiErrorResponse) toString() string { return fmt.Sprintf("error.status=%v; error.text=%v; error.code=%v; request_id=%v; href=%v", resp.Error.Status, resp.Error.Text, resp.Error.Code, resp.RequestID, resp.Href) } type apiError struct { Text string `json:"text"` Code string `json:"code"` Status int `json:"status"` } type internalError struct { err string apiError } // Error allows internalError to match the Error interface. func (internalError *internalError) Error() string { return internalError.err } // Wrap is a way of including current context while preserving previous error information, // similar to `return fmt.Errorf("error doing foo, err: %v", err)` but for our internalError type. func (internalError *internalError) Wrap(err string) *internalError { internalError.err = fmt.Sprintf("%v. %v", err, internalError.err) return internalError } // this is an internal representation of the Stats object including endpoint and streamEndpoint lists. type extendedStats struct { endpoints []string streamEndpoints []string Stats } func defaultStats() *extendedStats { return &extendedStats{ endpoints: []string{}, streamEndpoints: []string{}, Stats: Stats{ Upstreams: map[string]Upstream{}, ServerZones: map[string]ServerZone{}, StreamServerZones: map[string]StreamServerZone{}, StreamUpstreams: map[string]StreamUpstream{}, Slabs: map[string]Slab{}, Caches: map[string]HTTPCache{}, HTTPLimitConnections: map[string]LimitConnection{}, StreamLimitConnections: map[string]LimitConnection{}, HTTPLimitRequests: map[string]HTTPLimitRequest{}, Resolvers: map[string]Resolver{}, LocationZones: map[string]LocationZone{}, StreamZoneSync: nil, Workers: []*Workers{}, NginxInfo: NginxInfo{}, SSL: SSL{}, Connections: Connections{}, HTTPRequests: HTTPRequests{}, Processes: Processes{}, }, } } // Stats represents NGINX Plus stats fetched from the NGINX Plus API. // https://nginx.org/en/docs/http/ngx_http_api_module.html type Stats struct { Upstreams Upstreams ServerZones ServerZones StreamServerZones StreamServerZones StreamUpstreams StreamUpstreams Slabs Slabs Caches Caches HTTPLimitConnections HTTPLimitConnections StreamLimitConnections StreamLimitConnections HTTPLimitRequests HTTPLimitRequests Resolvers Resolvers LocationZones LocationZones StreamZoneSync *StreamZoneSync Workers []*Workers NginxInfo NginxInfo SSL SSL Connections Connections HTTPRequests HTTPRequests Processes Processes } // NginxInfo contains general information about NGINX Plus. type NginxInfo struct { Version string Build string Address string LoadTimestamp string `json:"load_timestamp"` Timestamp string Generation uint64 ProcessID uint64 `json:"pid"` ParentProcessID uint64 `json:"ppid"` } // LicenseReporting contains information about license status for NGINX Plus. type LicenseReporting struct { Healthy bool Fails uint64 Grace uint64 } // NginxLicense contains licensing information about NGINX Plus. type NginxLicense struct { ActiveTill uint64 `json:"active_till"` Eval bool Reporting LicenseReporting } // Caches is a map of cache stats by cache zone. type Caches = map[string]HTTPCache // HTTPCache represents a zone's HTTP Cache. type HTTPCache struct { Size uint64 MaxSize uint64 `json:"max_size"` Cold bool Hit CacheStats Stale CacheStats Updating CacheStats Revalidated CacheStats Miss CacheStats Expired ExtendedCacheStats Bypass ExtendedCacheStats } // CacheStats are basic cache stats. type CacheStats struct { Responses uint64 Bytes uint64 } // ExtendedCacheStats are extended cache stats. type ExtendedCacheStats struct { CacheStats ResponsesWritten uint64 `json:"responses_written"` BytesWritten uint64 `json:"bytes_written"` } // Connections represents connection related stats. type Connections struct { Accepted uint64 Dropped uint64 Active uint64 Idle uint64 } // Slabs is map of slab stats by zone name. type Slabs map[string]Slab // Slab represents slab related stats. type Slab struct { Slots Slots Pages Pages } // Pages represents the slab memory usage stats. type Pages struct { Used uint64 Free uint64 } // Slots is a map of slots by slot size. type Slots map[string]Slot // Slot represents slot related stats. type Slot struct { Used uint64 Free uint64 Reqs uint64 Fails uint64 } // HTTPRequests represents HTTP request related stats. type HTTPRequests struct { Total uint64 Current uint64 } // SSL represents SSL related stats. type SSL struct { Handshakes uint64 HandshakesFailed uint64 `json:"handshakes_failed"` SessionReuses uint64 `json:"session_reuses"` NoCommonProtocol uint64 `json:"no_common_protocol"` NoCommonCipher uint64 `json:"no_common_cipher"` HandshakeTimeout uint64 `json:"handshake_timeout"` PeerRejectedCert uint64 `json:"peer_rejected_cert"` VerifyFailures VerifyFailures `json:"verify_failures"` } type VerifyFailures struct { NoCert uint64 `json:"no_cert"` ExpiredCert uint64 `json:"expired_cert"` RevokedCert uint64 `json:"revoked_cert"` HostnameMismatch uint64 `json:"hostname_mismatch"` Other uint64 `json:"other"` } // ServerZones is map of server zone stats by zone name. type ServerZones map[string]ServerZone // ServerZone represents server zone related stats. type ServerZone struct { Processing uint64 Requests uint64 Responses Responses Discarded uint64 Received uint64 Sent uint64 SSL SSL } // StreamServerZones is map of stream server zone stats by zone name. type StreamServerZones map[string]StreamServerZone // StreamServerZone represents stream server zone related stats. type StreamServerZone struct { Processing uint64 Connections uint64 Sessions Sessions Discarded uint64 Received uint64 Sent uint64 SSL SSL } // StreamZoneSync represents the sync information per each shared memory zone and the sync information per node in a cluster. type StreamZoneSync struct { Zones map[string]SyncZone Status StreamZoneSyncStatus } // SyncZone represents the synchronization status of a shared memory zone. type SyncZone struct { RecordsPending uint64 `json:"records_pending"` RecordsTotal uint64 `json:"records_total"` } // StreamZoneSyncStatus represents the status of a shared memory zone. type StreamZoneSyncStatus struct { BytesIn uint64 `json:"bytes_in"` MsgsIn uint64 `json:"msgs_in"` MsgsOut uint64 `json:"msgs_out"` BytesOut uint64 `json:"bytes_out"` NodesOnline uint64 `json:"nodes_online"` } // Responses represents HTTP response related stats. type Responses struct { Codes HTTPCodes Responses1xx uint64 `json:"1xx"` Responses2xx uint64 `json:"2xx"` Responses3xx uint64 `json:"3xx"` Responses4xx uint64 `json:"4xx"` Responses5xx uint64 `json:"5xx"` Total uint64 } // HTTPCodes represents HTTP response codes. type HTTPCodes struct { HTTPContinue uint64 `json:"100,omitempty"` HTTPSwitchingProtocols uint64 `json:"101,omitempty"` HTTPProcessing uint64 `json:"102,omitempty"` HTTPOk uint64 `json:"200,omitempty"` HTTPCreated uint64 `json:"201,omitempty"` HTTPAccepted uint64 `json:"202,omitempty"` HTTPNoContent uint64 `json:"204,omitempty"` HTTPPartialContent uint64 `json:"206,omitempty"` HTTPSpecialResponse uint64 `json:"300,omitempty"` HTTPMovedPermanently uint64 `json:"301,omitempty"` HTTPMovedTemporarily uint64 `json:"302,omitempty"` HTTPSeeOther uint64 `json:"303,omitempty"` HTTPNotModified uint64 `json:"304,omitempty"` HTTPTemporaryRedirect uint64 `json:"307,omitempty"` HTTPBadRequest uint64 `json:"400,omitempty"` HTTPUnauthorized uint64 `json:"401,omitempty"` HTTPForbidden uint64 `json:"403,omitempty"` HTTPNotFound uint64 `json:"404,omitempty"` HTTPNotAllowed uint64 `json:"405,omitempty"` HTTPRequestTimeOut uint64 `json:"408,omitempty"` HTTPConflict uint64 `json:"409,omitempty"` HTTPLengthRequired uint64 `json:"411,omitempty"` HTTPPreconditionFailed uint64 `json:"412,omitempty"` HTTPRequestEntityTooLarge uint64 `json:"413,omitempty"` HTTPRequestURITooLarge uint64 `json:"414,omitempty"` HTTPUnsupportedMediaType uint64 `json:"415,omitempty"` HTTPRangeNotSatisfiable uint64 `json:"416,omitempty"` HTTPTooManyRequests uint64 `json:"429,omitempty"` HTTPClose uint64 `json:"444,omitempty"` HTTPRequestHeaderTooLarge uint64 `json:"494,omitempty"` HTTPSCertError uint64 `json:"495,omitempty"` HTTPSNoCert uint64 `json:"496,omitempty"` HTTPToHTTPS uint64 `json:"497,omitempty"` HTTPClientClosedRequest uint64 `json:"499,omitempty"` HTTPInternalServerError uint64 `json:"500,omitempty"` HTTPNotImplemented uint64 `json:"501,omitempty"` HTTPBadGateway uint64 `json:"502,omitempty"` HTTPServiceUnavailable uint64 `json:"503,omitempty"` HTTPGatewayTimeOut uint64 `json:"504,omitempty"` HTTPInsufficientStorage uint64 `json:"507,omitempty"` } // Sessions represents stream session related stats. type Sessions struct { Sessions2xx uint64 `json:"2xx"` Sessions4xx uint64 `json:"4xx"` Sessions5xx uint64 `json:"5xx"` Total uint64 } // Upstreams is a map of upstream stats by upstream name. type Upstreams map[string]Upstream // Upstream represents upstream related stats. type Upstream struct { Zone string Peers []Peer Queue Queue Keepalive int Zombies int } // StreamUpstreams is a map of stream upstream stats by upstream name. type StreamUpstreams map[string]StreamUpstream // StreamUpstream represents stream upstream related stats. type StreamUpstream struct { Zone string Peers []StreamPeer Zombies int } // Queue represents queue related stats for an upstream. type Queue struct { Size int MaxSize int `json:"max_size"` Overflows uint64 } // Peer represents peer (upstream server) related stats. type Peer struct { Server string Service string Name string Selected string Downstart string State string Responses Responses SSL SSL HealthChecks HealthChecks `json:"health_checks"` Requests uint64 ID int MaxConns int `json:"max_conns"` Sent uint64 Received uint64 Fails uint64 Unavail uint64 Active uint64 Downtime uint64 Weight int HeaderTime uint64 `json:"header_time"` ResponseTime uint64 `json:"response_time"` Backup bool } // StreamPeer represents peer (stream upstream server) related stats. type StreamPeer struct { Server string Service string Name string Selected string Downstart string State string SSL SSL HealthChecks HealthChecks `json:"health_checks"` Connections uint64 Received uint64 ID int ConnectTime int `json:"connect_time"` FirstByteTime int `json:"first_byte_time"` ResponseTime uint64 `json:"response_time"` Sent uint64 MaxConns int `json:"max_conns"` Fails uint64 Unavail uint64 Active uint64 Downtime uint64 Weight int Backup bool } // HealthChecks represents health check related stats for a peer. type HealthChecks struct { Checks uint64 Fails uint64 Unhealthy uint64 LastPassed bool `json:"last_passed"` } // LocationZones represents location_zones related stats. type LocationZones map[string]LocationZone // Resolvers represents resolvers related stats. type Resolvers map[string]Resolver // LocationZone represents location_zones related stats. type LocationZone struct { Requests int64 Responses Responses Discarded int64 Received int64 Sent int64 } // Resolver represents resolvers related stats. type Resolver struct { Requests ResolverRequests `json:"requests"` Responses ResolverResponses `json:"responses"` } // ResolverRequests represents resolver requests. type ResolverRequests struct { Name int64 Srv int64 Addr int64 } // ResolverResponses represents resolver responses. type ResolverResponses struct { Noerror int64 Formerr int64 Servfail int64 Nxdomain int64 Notimp int64 Refused int64 Timedout int64 Unknown int64 } // Processes represents processes related stats. type Processes struct { Respawned int64 } // HTTPLimitRequest represents HTTP Requests Rate Limiting. type HTTPLimitRequest struct { Passed uint64 Delayed uint64 Rejected uint64 DelayedDryRun uint64 `json:"delayed_dry_run"` RejectedDryRun uint64 `json:"rejected_dry_run"` } // HTTPLimitRequests represents limit requests related stats. type HTTPLimitRequests map[string]HTTPLimitRequest // LimitConnection represents Connections Limiting. type LimitConnection struct { Passed uint64 Rejected uint64 RejectedDryRun uint64 `json:"rejected_dry_run"` } // HTTPLimitConnections represents limit connections related stats. type HTTPLimitConnections map[string]LimitConnection // StreamLimitConnections represents limit connections related stats. type StreamLimitConnections map[string]LimitConnection // Workers represents worker connections related stats. type Workers struct { ID int ProcessID uint64 `json:"pid"` HTTP WorkersHTTP `json:"http"` Connections Connections } // WorkersHTTP represents HTTP worker connections. type WorkersHTTP struct { HTTPRequests HTTPRequests `json:"requests"` } // WithHTTPClient sets the HTTP client to use for accessing the API. func WithHTTPClient(httpClient *http.Client) Option { return func(o *NginxClient) { o.httpClient = httpClient } } // WithAPIVersion sets the API version to use for accessing the API. func WithAPIVersion(apiVersion int) Option { return func(o *NginxClient) { o.apiVersion = apiVersion } } // WithCheckAPI sets the flag to check the API version of the server. func WithCheckAPI() Option { return func(o *NginxClient) { o.checkAPI = true } } // WithMaxAPIVersion sets the API version to the max API version. func WithMaxAPIVersion() Option { return func(o *NginxClient) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() version, err := o.GetMaxAPIVersion(ctx) if err != nil { return } o.apiVersion = version } } // NewNginxClient creates a new NginxClient. func NewNginxClient(apiEndpoint string, opts ...Option) (*NginxClient, error) { c := &NginxClient{ httpClient: http.DefaultClient, apiEndpoint: apiEndpoint, apiVersion: APIVersion, checkAPI: false, } for _, opt := range opts { opt(c) } if c.httpClient == nil { return nil, fmt.Errorf("http client: %w", ErrParameterRequired) } if !versionSupported(c.apiVersion) { return nil, fmt.Errorf("API version %v: %w by the client", c.apiVersion, ErrNotSupported) } if c.checkAPI { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() versions, err := c.getAPIVersions(ctx, c.httpClient, apiEndpoint) if err != nil { return nil, fmt.Errorf("error accessing the API: %w", err) } found := false for _, v := range *versions { if v == c.apiVersion { found = true break } } if !found { return nil, fmt.Errorf("API version %v: %w by the server", c.apiVersion, ErrNotSupported) } } return c, nil } func versionSupported(n int) bool { for _, version := range supportedAPIVersions { if n == version { return true } } return false } // GetMaxAPIVersion returns the maximum API version supported by the server and the client. func (client *NginxClient) GetMaxAPIVersion(ctx context.Context) (int, error) { serverVersions, err := client.getAPIVersions(ctx, client.httpClient, client.apiEndpoint) if err != nil { return 0, fmt.Errorf("failed to get max API version: %w", err) } maxServerVersion := slices.Max(*serverVersions) maxClientVersion := slices.Max(supportedAPIVersions) if maxServerVersion > maxClientVersion { return maxClientVersion, nil } return maxServerVersion, nil } func (client *NginxClient) getAPIVersions(ctx context.Context, httpClient *http.Client, endpoint string) (*versions, error) { req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil) if err != nil { return nil, fmt.Errorf("failed to create a get request: %w", err) } resp, err := httpClient.Do(req) if err != nil { return nil, fmt.Errorf("%v is not accessible: %w", endpoint, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { return nil, createResponseMismatchError(resp.Body).Wrap(fmt.Sprintf( "failed to get endpoint %q, expected %v response, got %v", endpoint, http.StatusOK, resp.StatusCode)) } body, err := io.ReadAll(resp.Body) if err != nil { return nil, fmt.Errorf("error while reading body of the response: %w", err) } var vers versions err = json.Unmarshal(body, &vers) if err != nil { return nil, fmt.Errorf("error unmarshalling versions, got %q response: %w", string(body), err) } return &vers, nil } func createResponseMismatchError(respBody io.ReadCloser) *internalError { apiErrResp, err := readAPIErrorResponse(respBody) if err != nil { return &internalError{ err: fmt.Sprintf("failed to read the response body: %v", err), } } return &internalError{ err: apiErrResp.toString(), apiError: apiErrResp.Error, } } func readAPIErrorResponse(respBody io.ReadCloser) (*apiErrorResponse, error) { body, err := io.ReadAll(respBody) if err != nil { return nil, fmt.Errorf("failed to read the response body: %w", err) } var apiErr apiErrorResponse err = json.Unmarshal(body, &apiErr) if err != nil { return nil, fmt.Errorf("error unmarshalling apiErrorResponse: got %q response: %w", string(body), err) } return &apiErr, nil } // CheckIfUpstreamExists checks if the upstream exists in NGINX. If the upstream doesn't exist, it returns the error. func (client *NginxClient) CheckIfUpstreamExists(ctx context.Context, upstream string) error { _, err := client.GetHTTPServers(ctx, upstream) return err } // GetHTTPServers returns the servers of the upstream from NGINX. func (client *NginxClient) GetHTTPServers(ctx context.Context, upstream string) ([]UpstreamServer, error) { path := fmt.Sprintf("http/upstreams/%v/servers", upstream) var servers []UpstreamServer err := client.get(ctx, path, &servers) if err != nil { return nil, fmt.Errorf("failed to get the HTTP servers of upstream %v: %w", upstream, err) } return servers, nil } // AddHTTPServer adds the server to the upstream. func (client *NginxClient) AddHTTPServer(ctx context.Context, upstream string, server UpstreamServer) error { id, err := client.getIDOfHTTPServer(ctx, upstream, server.Server) if err != nil { return fmt.Errorf("failed to add %v server to %v upstream: %w", server.Server, upstream, err) } if id != -1 { return fmt.Errorf("failed to add %v server to %v upstream: %w", server.Server, upstream, ErrServerExists) } err = client.addHTTPServer(ctx, upstream, server) return err } func (client *NginxClient) addHTTPServer(ctx context.Context, upstream string, server UpstreamServer) error { path := fmt.Sprintf("http/upstreams/%v/servers/", upstream) err := client.post(ctx, path, &server) if err != nil { return fmt.Errorf("failed to add %v server to %v upstream: %w", server.Server, upstream, err) } return nil } // DeleteHTTPServer the server from the upstream. func (client *NginxClient) DeleteHTTPServer(ctx context.Context, upstream string, server string) error { id, err := client.getIDOfHTTPServer(ctx, upstream, server) if err != nil { return fmt.Errorf("failed to remove %v server from %v upstream: %w", server, upstream, err) } if id == -1 { return fmt.Errorf("failed to remove %v server from %v upstream: %w", server, upstream, ErrServerNotFound) } err = client.deleteHTTPServer(ctx, upstream, server, id) return err } func (client *NginxClient) deleteHTTPServer(ctx context.Context, upstream, server string, serverID int) error { path := fmt.Sprintf("http/upstreams/%v/servers/%v", upstream, serverID) err := client.delete(ctx, path, http.StatusOK) if err != nil { return fmt.Errorf("failed to remove %v server from %v upstream: %w", server, upstream, err) } return nil } // UpdateHTTPServers updates the servers of the upstream. // Servers that are in the slice, but don't exist in NGINX will be added to NGINX. // Servers that aren't in the slice, but exist in NGINX, will be removed from NGINX. // Servers that are in the slice and exist in NGINX, but have different parameters, will be updated. // The client will attempt to update all servers, returning all the errors that occurred. // If there are duplicate servers with equivalent parameters, the duplicates will be ignored. // If there are duplicate servers with different parameters, those server entries will be ignored and an error returned. func (client *NginxClient) UpdateHTTPServers(ctx context.Context, upstream string, servers []UpstreamServer) (added []UpstreamServer, deleted []UpstreamServer, updated []UpstreamServer, err error) { serversInNginx, err := client.GetHTTPServers(ctx, upstream) if err != nil { return nil, nil, nil, fmt.Errorf("failed to update servers of %v upstream: %w", upstream, err) } // We assume port 80 if no port is set for servers. formattedServers := make([]UpstreamServer, 0, len(servers)) for _, server := range servers { server.Server = addPortToServer(server.Server) formattedServers = append(formattedServers, server) } formattedServers, err = deduplicateServers(upstream, formattedServers) toAdd, toDelete, toUpdate := determineUpdates(formattedServers, serversInNginx) for _, server := range toAdd { addErr := client.addHTTPServer(ctx, upstream, server) if addErr != nil { err = errors.Join(err, addErr) continue } added = append(added, server) } for _, server := range toDelete { deleteErr := client.deleteHTTPServer(ctx, upstream, server.Server, server.ID) if deleteErr != nil { err = errors.Join(err, deleteErr) continue } deleted = append(deleted, server) } for _, server := range toUpdate { updateErr := client.UpdateHTTPServer(ctx, upstream, server) if updateErr != nil { err = errors.Join(err, updateErr) continue } updated = append(updated, server) } if err != nil { err = fmt.Errorf("failed to update servers of %s upstream: %w", upstream, err) } return added, deleted, updated, err } func deduplicateServers(upstream string, servers []UpstreamServer) ([]UpstreamServer, error) { type serverCheck struct { server UpstreamServer valid bool } serverMap := make(map[string]*serverCheck, len(servers)) var err error for _, server := range servers { if prev, ok := serverMap[server.Server]; ok { if !prev.valid { continue } if !server.hasSameParametersAs(prev.server) { prev.valid = false err = errors.Join(err, fmt.Errorf( "failed to update %s server to %s upstream: %w", server.Server, upstream, ErrParameterMismatch)) } continue } serverMap[server.Server] = &serverCheck{server, true} } retServers := make([]UpstreamServer, 0, len(serverMap)) for _, server := range servers { if check, ok := serverMap[server.Server]; ok && check.valid { retServers = append(retServers, server) delete(serverMap, server.Server) } } return retServers, err } // hasSameParametersAs checks if a given server has the same parameters. func (s UpstreamServer) hasSameParametersAs(compareServer UpstreamServer) bool { s.ID = compareServer.ID s.applyDefaults() compareServer.applyDefaults() return reflect.DeepEqual(s, compareServer) } func (s *UpstreamServer) applyDefaults() { if s.MaxConns == nil { s.MaxConns = &defaultMaxConns } if s.MaxFails == nil { s.MaxFails = &defaultMaxFails } if s.FailTimeout == "" { s.FailTimeout = defaultFailTimeout } if s.SlowStart == "" { s.SlowStart = defaultSlowStart } if s.Backup == nil { s.Backup = &defaultBackup } if s.Down == nil { s.Down = &defaultDown } if s.Weight == nil { s.Weight = &defaultWeight } } func determineUpdates(updatedServers []UpstreamServer, nginxServers []UpstreamServer) (toAdd []UpstreamServer, toRemove []UpstreamServer, toUpdate []UpstreamServer) { for _, server := range updatedServers { updateFound := false for _, serverNGX := range nginxServers { if server.Server == serverNGX.Server && !server.hasSameParametersAs(serverNGX) { server.ID = serverNGX.ID updateFound = true break } } if updateFound { toUpdate = append(toUpdate, server) } } for _, server := range updatedServers { found := false for _, serverNGX := range nginxServers { if server.Server == serverNGX.Server { found = true break } } if !found { toAdd = append(toAdd, server) } } for _, serverNGX := range nginxServers { found := false for _, server := range updatedServers { if serverNGX.Server == server.Server { found = true break } } if !found { toRemove = append(toRemove, serverNGX) } } return } func (client *NginxClient) getIDOfHTTPServer(ctx context.Context, upstream string, name string) (int, error) { servers, err := client.GetHTTPServers(ctx, upstream) if err != nil { return -1, fmt.Errorf("error getting id of server %v of upstream %v: %w", name, upstream, err) } for _, s := range servers { if s.Server == name { return s.ID, nil } } return -1, nil } func (client *NginxClient) get(ctx context.Context, path string, data interface{}) error { url := fmt.Sprintf("%v/%v/%v", client.apiEndpoint, client.apiVersion, path) req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) if err != nil { return fmt.Errorf("failed to create a get request: %w", err) } resp, err := client.httpClient.Do(req) if err != nil { return fmt.Errorf("failed to get %v: %w", path, err) } if resp.StatusCode != http.StatusOK { return createResponseMismatchError(resp.Body).Wrap(fmt.Sprintf( "expected %v response, got %v", http.StatusOK, resp.StatusCode)) } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("failed to read the response body: %w", err) } err = json.Unmarshal(body, data) if err != nil { return fmt.Errorf("error unmarshaling response %q: %w", string(body), err) } return nil } func (client *NginxClient) post(ctx context.Context, path string, input interface{}) error { url := fmt.Sprintf("%v/%v/%v", client.apiEndpoint, client.apiVersion, path) jsonInput, err := json.Marshal(input) if err != nil { return fmt.Errorf("failed to marshall input: %w", err) } req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewBuffer(jsonInput)) if err != nil { return fmt.Errorf("failed to create a post request: %w", err) } req.Header.Set("Content-Type", "application/json") resp, err := client.httpClient.Do(req) if err != nil { return fmt.Errorf("failed to post %v: %w", path, err) } defer resp.Body.Close() if resp.StatusCode != http.StatusCreated { return createResponseMismatchError(resp.Body).Wrap(fmt.Sprintf( "expected %v response, got %v", http.StatusCreated, resp.StatusCode)) } return nil } func (client *NginxClient) delete(ctx context.Context, path string, expectedStatusCode int) error { path = fmt.Sprintf("%v/%v/%v/", client.apiEndpoint, client.apiVersion, path) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, path, nil) if err != nil { return fmt.Errorf("failed to create a delete request: %w", err) } resp, err := client.httpClient.Do(req) if err != nil { return fmt.Errorf("failed to create delete request: %w", err) } defer resp.Body.Close() if resp.StatusCode != expectedStatusCode { return createResponseMismatchError(resp.Body).Wrap(fmt.Sprintf( "failed to complete delete request: expected %v response, got %v", expectedStatusCode, resp.StatusCode)) } return nil } func (client *NginxClient) patch(ctx context.Context, path string, input interface{}, expectedStatusCode int) error { path = fmt.Sprintf("%v/%v/%v/", client.apiEndpoint, client.apiVersion, path) jsonInput, err := json.Marshal(input) if err != nil { return fmt.Errorf("failed to marshall input: %w", err) } req, err := http.NewRequestWithContext(ctx, http.MethodPatch, path, bytes.NewBuffer(jsonInput)) if err != nil { return fmt.Errorf("failed to create a patch request: %w", err) } req.Header.Set("Content-Type", "application/json") resp, err := client.httpClient.Do(req) if err != nil { return fmt.Errorf("failed to create patch request: %w", err) } defer resp.Body.Close() if resp.StatusCode != expectedStatusCode { return createResponseMismatchError(resp.Body).Wrap(fmt.Sprintf( "failed to complete patch request: expected %v response, got %v", expectedStatusCode, resp.StatusCode)) } return nil } // CheckIfStreamUpstreamExists checks if the stream upstream exists in NGINX. If the upstream doesn't exist, it returns the error. func (client *NginxClient) CheckIfStreamUpstreamExists(ctx context.Context, upstream string) error { _, err := client.GetStreamServers(ctx, upstream) return err } // GetStreamServers returns the stream servers of the upstream from NGINX. func (client *NginxClient) GetStreamServers(ctx context.Context, upstream string) ([]StreamUpstreamServer, error) { path := fmt.Sprintf("stream/upstreams/%v/servers", upstream) var servers []StreamUpstreamServer err := client.get(ctx, path, &servers) if err != nil { return nil, fmt.Errorf("failed to get stream servers of upstream server %v: %w", upstream, err) } return servers, nil } // AddStreamServer adds the stream server to the upstream. func (client *NginxClient) AddStreamServer(ctx context.Context, upstream string, server StreamUpstreamServer) error { id, err := client.getIDOfStreamServer(ctx, upstream, server.Server) if err != nil { return fmt.Errorf("failed to add %v stream server to %v upstream: %w", server.Server, upstream, err) } if id != -1 { return fmt.Errorf("failed to add %v stream server to %v upstream: %w", server.Server, upstream, ErrServerExists) } err = client.addStreamServer(ctx, upstream, server) return err } func (client *NginxClient) addStreamServer(ctx context.Context, upstream string, server StreamUpstreamServer) error { path := fmt.Sprintf("stream/upstreams/%v/servers/", upstream) err := client.post(ctx, path, &server) if err != nil { return fmt.Errorf("failed to add %v stream server to %v upstream: %w", server.Server, upstream, err) } return nil } // DeleteStreamServer the server from the upstream. func (client *NginxClient) DeleteStreamServer(ctx context.Context, upstream string, server string) error { id, err := client.getIDOfStreamServer(ctx, upstream, server) if err != nil { return fmt.Errorf("failed to remove %v stream server from %v upstream: %w", server, upstream, err) } if id == -1 { return fmt.Errorf("failed to remove %v stream server from %v upstream: %w", server, upstream, ErrServerNotFound) } err = client.deleteStreamServer(ctx, upstream, server, id) return err } func (client *NginxClient) deleteStreamServer(ctx context.Context, upstream, server string, serverID int) error { path := fmt.Sprintf("stream/upstreams/%v/servers/%v", upstream, serverID) err := client.delete(ctx, path, http.StatusOK) if err != nil { return fmt.Errorf("failed to remove %v stream server from %v upstream: %w", server, upstream, err) } return nil } // UpdateStreamServers updates the servers of the upstream. // Servers that are in the slice, but don't exist in NGINX will be added to NGINX. // Servers that aren't in the slice, but exist in NGINX, will be removed from NGINX. // Servers that are in the slice and exist in NGINX, but have different parameters, will be updated. // The client will attempt to update all servers, returning all the errors that occurred. // If there are duplicate servers with equivalent parameters, the duplicates will be ignored. // If there are duplicate servers with different parameters, those server entries will be ignored and an error returned. func (client *NginxClient) UpdateStreamServers(ctx context.Context, upstream string, servers []StreamUpstreamServer) (added []StreamUpstreamServer, deleted []StreamUpstreamServer, updated []StreamUpstreamServer, err error) { serversInNginx, err := client.GetStreamServers(ctx, upstream) if err != nil { return nil, nil, nil, fmt.Errorf("failed to update stream servers of %v upstream: %w", upstream, err) } formattedServers := make([]StreamUpstreamServer, 0, len(servers)) for _, server := range servers { server.Server = addPortToServer(server.Server) formattedServers = append(formattedServers, server) } formattedServers, err = deduplicateStreamServers(upstream, formattedServers) toAdd, toDelete, toUpdate := determineStreamUpdates(formattedServers, serversInNginx) for _, server := range toAdd { addErr := client.addStreamServer(ctx, upstream, server) if addErr != nil { err = errors.Join(err, addErr) continue } added = append(added, server) } for _, server := range toDelete { deleteErr := client.deleteStreamServer(ctx, upstream, server.Server, server.ID) if deleteErr != nil { err = errors.Join(err, deleteErr) continue } deleted = append(deleted, server) } for _, server := range toUpdate { updateErr := client.UpdateStreamServer(ctx, upstream, server) if updateErr != nil { err = errors.Join(err, updateErr) continue } updated = append(updated, server) } if err != nil { err = fmt.Errorf("failed to update stream servers of %s upstream: %w", upstream, err) } return added, deleted, updated, err } func (client *NginxClient) getIDOfStreamServer(ctx context.Context, upstream string, name string) (int, error) { servers, err := client.GetStreamServers(ctx, upstream) if err != nil { return -1, fmt.Errorf("error getting id of stream server %v of upstream %v: %w", name, upstream, err) } for _, s := range servers { if s.Server == name { return s.ID, nil } } return -1, nil } func deduplicateStreamServers(upstream string, servers []StreamUpstreamServer) ([]StreamUpstreamServer, error) { type serverCheck struct { server StreamUpstreamServer valid bool } serverMap := make(map[string]*serverCheck, len(servers)) var err error for _, server := range servers { if prev, ok := serverMap[server.Server]; ok { if !prev.valid { continue } if !server.hasSameParametersAs(prev.server) { prev.valid = false err = errors.Join(err, fmt.Errorf( "failed to update stream %s server to %s upstream: %w", server.Server, upstream, ErrParameterMismatch)) } continue } serverMap[server.Server] = &serverCheck{server, true} } retServers := make([]StreamUpstreamServer, 0, len(serverMap)) for _, server := range servers { if check, ok := serverMap[server.Server]; ok && check.valid { retServers = append(retServers, server) delete(serverMap, server.Server) } } return retServers, err } // hasSameParametersAs checks if a given server has the same parameters. func (s StreamUpstreamServer) hasSameParametersAs(compareServer StreamUpstreamServer) bool { s.ID = compareServer.ID s.applyDefaults() compareServer.applyDefaults() return reflect.DeepEqual(s, compareServer) } func (s *StreamUpstreamServer) applyDefaults() { if s.MaxConns == nil { s.MaxConns = &defaultMaxConns } if s.MaxFails == nil { s.MaxFails = &defaultMaxFails } if s.FailTimeout == "" { s.FailTimeout = defaultFailTimeout } if s.SlowStart == "" { s.SlowStart = defaultSlowStart } if s.Backup == nil { s.Backup = &defaultBackup } if s.Down == nil { s.Down = &defaultDown } if s.Weight == nil { s.Weight = &defaultWeight } } func determineStreamUpdates(updatedServers []StreamUpstreamServer, nginxServers []StreamUpstreamServer) (toAdd []StreamUpstreamServer, toRemove []StreamUpstreamServer, toUpdate []StreamUpstreamServer) { for _, server := range updatedServers { updateFound := false for _, serverNGX := range nginxServers { if server.Server == serverNGX.Server && !server.hasSameParametersAs(serverNGX) { server.ID = serverNGX.ID updateFound = true break } } if updateFound { toUpdate = append(toUpdate, server) } } for _, server := range updatedServers { found := false for _, serverNGX := range nginxServers { if server.Server == serverNGX.Server { found = true break } } if !found { toAdd = append(toAdd, server) } } for _, serverNGX := range nginxServers { found := false for _, server := range updatedServers { if serverNGX.Server == server.Server { found = true break } } if !found { toRemove = append(toRemove, serverNGX) } } return } // GetStats gets process, slab, connection, request, ssl, zone, stream zone, upstream and stream upstream related stats from the NGINX Plus API. func (client *NginxClient) GetStats(ctx context.Context) (*Stats, error) { initialGroup, initialCtx := errgroup.WithContext(ctx) var mu sync.Mutex stats := defaultStats() // Collecting initial stats initialGroup.Go(func() error { endpoints, err := client.GetAvailableEndpoints(initialCtx) if err != nil { return fmt.Errorf("failed to get available Endpoints: %w", err) } mu.Lock() stats.endpoints = endpoints mu.Unlock() return nil }) initialGroup.Go(func() error { nginxInfo, err := client.GetNginxInfo(initialCtx) if err != nil { return fmt.Errorf("failed to get NGINX info: %w", err) } mu.Lock() stats.NginxInfo = *nginxInfo mu.Unlock() return nil }) initialGroup.Go(func() error { caches, err := client.GetCaches(initialCtx) if err != nil { return fmt.Errorf("failed to get Caches: %w", err) } mu.Lock() stats.Caches = *caches mu.Unlock() return nil }) initialGroup.Go(func() error { processes, err := client.GetProcesses(initialCtx) if err != nil { return fmt.Errorf("failed to get Process information: %w", err) } mu.Lock() stats.Processes = *processes mu.Unlock() return nil }) initialGroup.Go(func() error { slabs, err := client.GetSlabs(initialCtx) if err != nil { return fmt.Errorf("failed to get Slabs: %w", err) } mu.Lock() stats.Slabs = *slabs mu.Unlock() return nil }) initialGroup.Go(func() error { httpRequests, err := client.GetHTTPRequests(initialCtx) if err != nil { return fmt.Errorf("failed to get HTTP Requests: %w", err) } mu.Lock() stats.HTTPRequests = *httpRequests mu.Unlock() return nil }) initialGroup.Go(func() error { ssl, err := client.GetSSL(initialCtx) if err != nil { return fmt.Errorf("failed to get SSL: %w", err) } mu.Lock() stats.SSL = *ssl mu.Unlock() return nil }) initialGroup.Go(func() error { serverZones, err := client.GetServerZones(initialCtx) if err != nil { return fmt.Errorf("failed to get Server Zones: %w", err) } mu.Lock() stats.ServerZones = *serverZones mu.Unlock() return nil }) initialGroup.Go(func() error { upstreams, err := client.GetUpstreams(initialCtx) if err != nil { return fmt.Errorf("failed to get Upstreams: %w", err) } mu.Lock() stats.Upstreams = *upstreams mu.Unlock() return nil }) initialGroup.Go(func() error { locationZones, err := client.GetLocationZones(initialCtx) if err != nil { return fmt.Errorf("failed to get Location Zones: %w", err) } mu.Lock() stats.LocationZones = *locationZones mu.Unlock() return nil }) initialGroup.Go(func() error { resolvers, err := client.GetResolvers(initialCtx) if err != nil { return fmt.Errorf("failed to get Resolvers: %w", err) } mu.Lock() stats.Resolvers = *resolvers mu.Unlock() return nil }) initialGroup.Go(func() error { httpLimitRequests, err := client.GetHTTPLimitReqs(initialCtx) if err != nil { return fmt.Errorf("failed to get HTTPLimitRequests: %w", err) } mu.Lock() stats.HTTPLimitRequests = *httpLimitRequests mu.Unlock() return nil }) initialGroup.Go(func() error { httpLimitConnections, err := client.GetHTTPConnectionsLimit(initialCtx) if err != nil { return fmt.Errorf("failed to get HTTPLimitConnections: %w", err) } mu.Lock() stats.HTTPLimitConnections = *httpLimitConnections mu.Unlock() return nil }) initialGroup.Go(func() error { workers, err := client.GetWorkers(initialCtx) if err != nil { return fmt.Errorf("failed to get Workers: %w", err) } mu.Lock() stats.Workers = workers mu.Unlock() return nil }) if err := initialGroup.Wait(); err != nil { return nil, fmt.Errorf("error returned from contacting Plus API: %w", err) } // Process stream endpoints if they exist if slices.Contains(stats.endpoints, "stream") { availableStreamGroup, asgCtx := errgroup.WithContext(ctx) availableStreamGroup.Go(func() error { streamEndpoints, err := client.GetAvailableStreamEndpoints(asgCtx) if err != nil { return fmt.Errorf("failed to get available Stream Endpoints: %w", err) } mu.Lock() stats.streamEndpoints = streamEndpoints mu.Unlock() return nil }) if err := availableStreamGroup.Wait(); err != nil { return nil, fmt.Errorf("no useful metrics found in stream stats: %w", err) } streamGroup, sgCtx := errgroup.WithContext(ctx) if slices.Contains(stats.streamEndpoints, "server_zones") { streamGroup.Go(func() error { streamServerZones, err := client.GetStreamServerZones(sgCtx) if err != nil { return fmt.Errorf("failed to get streamServerZones: %w", err) } mu.Lock() stats.StreamServerZones = *streamServerZones mu.Unlock() return nil }) } if slices.Contains(stats.streamEndpoints, "upstreams") { streamGroup.Go(func() error { streamUpstreams, err := client.GetStreamUpstreams(sgCtx) if err != nil { return fmt.Errorf("failed to get StreamUpstreams: %w", err) } mu.Lock() stats.StreamUpstreams = *streamUpstreams mu.Unlock() return nil }) } if slices.Contains(stats.streamEndpoints, "limit_conns") { streamGroup.Go(func() error { streamConnectionsLimit, err := client.GetStreamConnectionsLimit(sgCtx) if err != nil { return fmt.Errorf("failed to get StreamLimitConnections: %w", err) } mu.Lock() stats.StreamLimitConnections = *streamConnectionsLimit mu.Unlock() return nil }) streamGroup.Go(func() error { streamZoneSync, err := client.GetStreamZoneSync(sgCtx) if err != nil { return fmt.Errorf("failed to get StreamZoneSync: %w", err) } mu.Lock() stats.StreamZoneSync = streamZoneSync mu.Unlock() return nil }) } if err := streamGroup.Wait(); err != nil { return nil, fmt.Errorf("no useful metrics found in stream stats: %w", err) } } // Report connection metrics separately so it does not influence the results connectionsGroup, cgCtx := errgroup.WithContext(ctx) connectionsGroup.Go(func() error { // replace this call with a context specific call connections, err := client.GetConnections(cgCtx) if err != nil { return fmt.Errorf("failed to get connections: %w", err) } mu.Lock() stats.Connections = *connections mu.Unlock() return nil }) if err := connectionsGroup.Wait(); err != nil { return nil, fmt.Errorf("connections metrics not found: %w", err) } return &stats.Stats, nil } // GetAvailableEndpoints returns available endpoints in the API. func (client *NginxClient) GetAvailableEndpoints(ctx context.Context) ([]string, error) { var endpoints []string err := client.get(ctx, "", &endpoints) if err != nil { return nil, fmt.Errorf("failed to get endpoints: %w", err) } return endpoints, nil } // GetAvailableStreamEndpoints returns available stream endpoints in the API with a context. func (client *NginxClient) GetAvailableStreamEndpoints(ctx context.Context) ([]string, error) { var endpoints []string err := client.get(ctx, "stream", &endpoints) if err != nil { return nil, fmt.Errorf("failed to get endpoints: %w", err) } return endpoints, nil } // GetNginxInfo returns Nginx stats with a context. func (client *NginxClient) GetNginxInfo(ctx context.Context) (*NginxInfo, error) { var info NginxInfo err := client.get(ctx, "nginx", &info) if err != nil { return nil, fmt.Errorf("failed to get info: %w", err) } return &info, nil } // GetNginxLicense returns Nginx License data with a context. func (client *NginxClient) GetNginxLicense(ctx context.Context) (*NginxLicense, error) { var data NginxLicense info, err := client.GetNginxInfo(ctx) if err != nil { return nil, fmt.Errorf("failed to get nginx info: %w", err) } release, err := extractPlusVersionValues(info.Build) if err != nil { return nil, fmt.Errorf("failed to get nginx plus release: %w", err) } if (client.apiVersion < 9) || (release < 33) { return &data, nil } err = client.get(ctx, "license", &data) if err != nil { return nil, fmt.Errorf("failed to get license: %w", err) } return &data, nil } // GetCaches returns Cache stats with a context. func (client *NginxClient) GetCaches(ctx context.Context) (*Caches, error) { var caches Caches err := client.get(ctx, "http/caches", &caches) if err != nil { return nil, fmt.Errorf("failed to get caches: %w", err) } return &caches, nil } // GetSlabs returns Slabs stats with a context. func (client *NginxClient) GetSlabs(ctx context.Context) (*Slabs, error) { var slabs Slabs err := client.get(ctx, "slabs", &slabs) if err != nil { return nil, fmt.Errorf("failed to get slabs: %w", err) } return &slabs, nil } // GetConnections returns Connections stats with a context. func (client *NginxClient) GetConnections(ctx context.Context) (*Connections, error) { var cons Connections err := client.get(ctx, "connections", &cons) if err != nil { return nil, fmt.Errorf("failed to get connections: %w", err) } return &cons, nil } // GetHTTPRequests returns http/requests stats with a context. func (client *NginxClient) GetHTTPRequests(ctx context.Context) (*HTTPRequests, error) { var requests HTTPRequests err := client.get(ctx, "http/requests", &requests) if err != nil { return nil, fmt.Errorf("failed to get http requests: %w", err) } return &requests, nil } // GetSSL returns SSL stats with a context. func (client *NginxClient) GetSSL(ctx context.Context) (*SSL, error) { var ssl SSL err := client.get(ctx, "ssl", &ssl) if err != nil { return nil, fmt.Errorf("failed to get ssl: %w", err) } return &ssl, nil } // GetServerZones returns http/server_zones stats with a context. func (client *NginxClient) GetServerZones(ctx context.Context) (*ServerZones, error) { var zones ServerZones err := client.get(ctx, "http/server_zones", &zones) if err != nil { return nil, fmt.Errorf("failed to get server zones: %w", err) } return &zones, err } // GetStreamServerZones returns stream/server_zones stats with a context. func (client *NginxClient) GetStreamServerZones(ctx context.Context) (*StreamServerZones, error) { var zones StreamServerZones err := client.get(ctx, "stream/server_zones", &zones) if err != nil { var ie *internalError if errors.As(err, &ie) { if ie.Code == pathNotFoundCode { return &zones, nil } } return nil, fmt.Errorf("failed to get stream server zones: %w", err) } return &zones, err } // GetUpstreams returns http/upstreams stats with a context. func (client *NginxClient) GetUpstreams(ctx context.Context) (*Upstreams, error) { var upstreams Upstreams err := client.get(ctx, "http/upstreams", &upstreams) if err != nil { return nil, fmt.Errorf("failed to get upstreams: %w", err) } return &upstreams, nil } // GetStreamUpstreams returns stream/upstreams stats with a context. func (client *NginxClient) GetStreamUpstreams(ctx context.Context) (*StreamUpstreams, error) { var upstreams StreamUpstreams err := client.get(ctx, "stream/upstreams", &upstreams) if err != nil { var ie *internalError if errors.As(err, &ie) { if ie.Code == pathNotFoundCode { return &upstreams, nil } } return nil, fmt.Errorf("failed to get stream upstreams: %w", err) } return &upstreams, nil } // GetStreamZoneSync returns stream/zone_sync stats with a context. func (client *NginxClient) GetStreamZoneSync(ctx context.Context) (*StreamZoneSync, error) { var streamZoneSync StreamZoneSync err := client.get(ctx, "stream/zone_sync", &streamZoneSync) if err != nil { var ie *internalError if errors.As(err, &ie) { if ie.Code == pathNotFoundCode { return nil, nil } } return nil, fmt.Errorf("failed to get stream zone sync: %w", err) } return &streamZoneSync, err } // GetLocationZones returns http/location_zones stats with a context. func (client *NginxClient) GetLocationZones(ctx context.Context) (*LocationZones, error) { var locationZones LocationZones if client.apiVersion < 5 { return &locationZones, nil } err := client.get(ctx, "http/location_zones", &locationZones) if err != nil { return nil, fmt.Errorf("failed to get location zones: %w", err) } return &locationZones, err } // GetResolvers returns Resolvers stats with a context. func (client *NginxClient) GetResolvers(ctx context.Context) (*Resolvers, error) { var resolvers Resolvers if client.apiVersion < 5 { return &resolvers, nil } err := client.get(ctx, "resolvers", &resolvers) if err != nil { return nil, fmt.Errorf("failed to get resolvers: %w", err) } return &resolvers, err } // GetProcesses returns Processes stats with a context. func (client *NginxClient) GetProcesses(ctx context.Context) (*Processes, error) { var processes Processes err := client.get(ctx, "processes", &processes) if err != nil { return nil, fmt.Errorf("failed to get processes: %w", err) } return &processes, err } // KeyValPairs are the key-value pairs stored in a zone. type KeyValPairs map[string]string // KeyValPairsByZone are the KeyValPairs for all zones, by zone name. type KeyValPairsByZone map[string]KeyValPairs // GetKeyValPairs fetches key/value pairs for a given HTTP zone. func (client *NginxClient) GetKeyValPairs(ctx context.Context, zone string) (KeyValPairs, error) { return client.getKeyValPairs(ctx, zone, httpContext) } // GetStreamKeyValPairs fetches key/value pairs for a given Stream zone. func (client *NginxClient) GetStreamKeyValPairs(ctx context.Context, zone string) (KeyValPairs, error) { return client.getKeyValPairs(ctx, zone, streamContext) } func (client *NginxClient) getKeyValPairs(ctx context.Context, zone string, stream bool) (KeyValPairs, error) { base := "http" if stream { base = "stream" } if zone == "" { return nil, fmt.Errorf("zone: %w", ErrParameterRequired) } path := fmt.Sprintf("%v/keyvals/%v", base, zone) var keyValPairs KeyValPairs err := client.get(ctx, path, &keyValPairs) if err != nil { return nil, fmt.Errorf("failed to get keyvals for %v/%v zone: %w", base, zone, err) } return keyValPairs, nil } // GetAllKeyValPairs fetches all key/value pairs for all HTTP zones. func (client *NginxClient) GetAllKeyValPairs(ctx context.Context) (KeyValPairsByZone, error) { return client.getAllKeyValPairs(ctx, httpContext) } // GetAllStreamKeyValPairs fetches all key/value pairs for all Stream zones. func (client *NginxClient) GetAllStreamKeyValPairs(ctx context.Context) (KeyValPairsByZone, error) { return client.getAllKeyValPairs(ctx, streamContext) } func (client *NginxClient) getAllKeyValPairs(ctx context.Context, stream bool) (KeyValPairsByZone, error) { base := "http" if stream { base = "stream" } path := fmt.Sprintf("%v/keyvals", base) var keyValPairsByZone KeyValPairsByZone err := client.get(ctx, path, &keyValPairsByZone) if err != nil { return nil, fmt.Errorf("failed to get keyvals for all %v zones: %w", base, err) } return keyValPairsByZone, nil } // AddKeyValPair adds a new key/value pair to a given HTTP zone. func (client *NginxClient) AddKeyValPair(ctx context.Context, zone string, key string, val string) error { return client.addKeyValPair(ctx, zone, key, val, httpContext) } // AddStreamKeyValPair adds a new key/value pair to a given Stream zone. func (client *NginxClient) AddStreamKeyValPair(ctx context.Context, zone string, key string, val string) error { return client.addKeyValPair(ctx, zone, key, val, streamContext) } func (client *NginxClient) addKeyValPair(ctx context.Context, zone string, key string, val string, stream bool) error { base := "http" if stream { base = "stream" } if zone == "" { return fmt.Errorf("zone: %w", ErrParameterRequired) } path := fmt.Sprintf("%v/keyvals/%v", base, zone) input := KeyValPairs{key: val} err := client.post(ctx, path, &input) if err != nil { return fmt.Errorf("failed to add key value pair for %v/%v zone: %w", base, zone, err) } return nil } // ModifyKeyValPair modifies the value of an existing key in a given HTTP zone. func (client *NginxClient) ModifyKeyValPair(ctx context.Context, zone string, key string, val string) error { return client.modifyKeyValPair(ctx, zone, key, val, httpContext) } // ModifyStreamKeyValPair modifies the value of an existing key in a given Stream zone. func (client *NginxClient) ModifyStreamKeyValPair(ctx context.Context, zone string, key string, val string) error { return client.modifyKeyValPair(ctx, zone, key, val, streamContext) } func (client *NginxClient) modifyKeyValPair(ctx context.Context, zone string, key string, val string, stream bool) error { base := "http" if stream { base = "stream" } if zone == "" { return fmt.Errorf("zone: %w", ErrParameterRequired) } path := fmt.Sprintf("%v/keyvals/%v", base, zone) input := KeyValPairs{key: val} err := client.patch(ctx, path, &input, http.StatusNoContent) if err != nil { return fmt.Errorf("failed to update key value pair for %v/%v zone: %w", base, zone, err) } return nil } // DeleteKeyValuePair deletes the key/value pair for a key in a given HTTP zone. func (client *NginxClient) DeleteKeyValuePair(ctx context.Context, zone string, key string) error { return client.deleteKeyValuePair(ctx, zone, key, httpContext) } // DeleteStreamKeyValuePair deletes the key/value pair for a key in a given Stream zone. func (client *NginxClient) DeleteStreamKeyValuePair(ctx context.Context, zone string, key string) error { return client.deleteKeyValuePair(ctx, zone, key, streamContext) } // To delete a key/value pair you set the value to null via the API, // then NGINX+ will delete the key. func (client *NginxClient) deleteKeyValuePair(ctx context.Context, zone string, key string, stream bool) error { base := "http" if stream { base = "stream" } if zone == "" { return fmt.Errorf("zone: %w", ErrParameterRequired) } // map[string]string can't have a nil value so we use a different type here. keyval := make(map[string]interface{}) keyval[key] = nil path := fmt.Sprintf("%v/keyvals/%v", base, zone) err := client.patch(ctx, path, &keyval, http.StatusNoContent) if err != nil { return fmt.Errorf("failed to remove key values pair for %v/%v zone: %w", base, zone, err) } return nil } // DeleteKeyValPairs deletes all the key-value pairs in a given HTTP zone. func (client *NginxClient) DeleteKeyValPairs(ctx context.Context, zone string) error { return client.deleteKeyValPairs(ctx, zone, httpContext) } // DeleteStreamKeyValPairs deletes all the key-value pairs in a given Stream zone. func (client *NginxClient) DeleteStreamKeyValPairs(ctx context.Context, zone string) error { return client.deleteKeyValPairs(ctx, zone, streamContext) } func (client *NginxClient) deleteKeyValPairs(ctx context.Context, zone string, stream bool) error { base := "http" if stream { base = "stream" } if zone == "" { return fmt.Errorf("zone: %w", ErrParameterRequired) } path := fmt.Sprintf("%v/keyvals/%v", base, zone) err := client.delete(ctx, path, http.StatusNoContent) if err != nil { return fmt.Errorf("failed to remove all key value pairs for %v/%v zone: %w", base, zone, err) } return nil } // UpdateHTTPServer updates the server of the upstream with the matching server ID. func (client *NginxClient) UpdateHTTPServer(ctx context.Context, upstream string, server UpstreamServer) error { path := fmt.Sprintf("http/upstreams/%v/servers/%v", upstream, server.ID) // The server ID is expected in the URI, but not expected in the body. // The NGINX API will return // {"error":{"status":400,"text":"unknown parameter \"id\"","code":"UpstreamConfFormatError"} // if the ID field is present. server.ID = 0 err := client.patch(ctx, path, &server, http.StatusOK) if err != nil { return fmt.Errorf("failed to update %v server to %v upstream: %w", server.Server, upstream, err) } return nil } // UpdateStreamServer updates the stream server of the upstream with the matching server ID. func (client *NginxClient) UpdateStreamServer(ctx context.Context, upstream string, server StreamUpstreamServer) error { path := fmt.Sprintf("stream/upstreams/%v/servers/%v", upstream, server.ID) // The server ID is expected in the URI, but not expected in the body. // The NGINX API will return // {"error":{"status":400,"text":"unknown parameter \"id\"","code":"UpstreamConfFormatError"} // if the ID field is present. server.ID = 0 err := client.patch(ctx, path, &server, http.StatusOK) if err != nil { return fmt.Errorf("failed to update %v stream server to %v upstream: %w", server.Server, upstream, err) } return nil } // Version returns client's current N+ API version. func (client *NginxClient) Version() int { return client.apiVersion } func addPortToServer(server string) string { if len(strings.Split(server, ":")) == 2 { return server } if len(strings.Split(server, "]:")) == 2 { return server } if strings.HasPrefix(server, "unix:") { return server } return fmt.Sprintf("%v:%v", server, defaultServerPort) } // GetHTTPLimitReqs returns http/limit_reqs stats with a context. func (client *NginxClient) GetHTTPLimitReqs(ctx context.Context) (*HTTPLimitRequests, error) { var limitReqs HTTPLimitRequests if client.apiVersion < 6 { return &limitReqs, nil } err := client.get(ctx, "http/limit_reqs", &limitReqs) if err != nil { return nil, fmt.Errorf("failed to get http limit requests: %w", err) } return &limitReqs, nil } // GetHTTPConnectionsLimit returns http/limit_conns stats with a context. func (client *NginxClient) GetHTTPConnectionsLimit(ctx context.Context) (*HTTPLimitConnections, error) { var limitConns HTTPLimitConnections if client.apiVersion < 6 { return &limitConns, nil } err := client.get(ctx, "http/limit_conns", &limitConns) if err != nil { return nil, fmt.Errorf("failed to get http connections limit: %w", err) } return &limitConns, nil } // GetStreamConnectionsLimit returns stream/limit_conns stats with a context. func (client *NginxClient) GetStreamConnectionsLimit(ctx context.Context) (*StreamLimitConnections, error) { var limitConns StreamLimitConnections if client.apiVersion < 6 { return &limitConns, nil } err := client.get(ctx, "stream/limit_conns", &limitConns) if err != nil { var ie *internalError if errors.As(err, &ie) { if ie.Code == pathNotFoundCode { return &limitConns, nil } } return nil, fmt.Errorf("failed to get stream connections limit: %w", err) } return &limitConns, nil } // GetWorkers returns workers stats. func (client *NginxClient) GetWorkers(ctx context.Context) ([]*Workers, error) { var workers []*Workers if client.apiVersion < 9 { return workers, nil } err := client.get(ctx, "workers", &workers) if err != nil { return nil, fmt.Errorf("failed to get workers: %w", err) } return workers, nil } var rePlus = regexp.MustCompile(`-r(\d+)`) // extractPlusVersionValues. func extractPlusVersionValues(input string) (int, error) { var rValue int matches := rePlus.FindStringSubmatch(input) if len(matches) < 1 { return 0, fmt.Errorf("%w [%s]", ErrPlusVersionNotFound, input) } rValue, err := strconv.Atoi(matches[1]) if err != nil { return 0, fmt.Errorf("failed to convert NGINX Plus release to integer: %w", err) } return rValue, nil } nginx-plus-go-client-2.3.0/client/nginx_test.go000066400000000000000000001051061474621132500214760ustar00rootroot00000000000000package client import ( "context" "encoding/json" "net/http" "net/http/httptest" "reflect" "strings" "sync" "testing" ) func TestDetermineUpdates(t *testing.T) { t.Parallel() maxConns := 1 tests := []struct { name string updated []UpstreamServer nginx []UpstreamServer expectedToAdd []UpstreamServer expectedToDelete []UpstreamServer expectedToUpdate []UpstreamServer }{ { updated: []UpstreamServer{ { Server: "10.0.0.3:80", }, { Server: "10.0.0.4:80", }, }, nginx: []UpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, }, expectedToAdd: []UpstreamServer{ { Server: "10.0.0.3:80", }, { Server: "10.0.0.4:80", }, }, expectedToDelete: []UpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, }, name: "replace all", }, { updated: []UpstreamServer{ { Server: "10.0.0.2:80", }, { Server: "10.0.0.3:80", }, { Server: "10.0.0.4:80", }, }, nginx: []UpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, { ID: 3, Server: "10.0.0.3:80", }, }, expectedToAdd: []UpstreamServer{ { Server: "10.0.0.4:80", }, }, expectedToDelete: []UpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, }, name: "add and delete", }, { updated: []UpstreamServer{ { Server: "10.0.0.1:80", }, { Server: "10.0.0.2:80", }, { Server: "10.0.0.3:80", }, }, nginx: []UpstreamServer{ { Server: "10.0.0.1:80", }, { Server: "10.0.0.2:80", }, { Server: "10.0.0.3:80", }, }, name: "same", }, { // empty values }, { updated: []UpstreamServer{ { Server: "10.0.0.1:80", MaxConns: &maxConns, }, }, nginx: []UpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, }, expectedToDelete: []UpstreamServer{ { ID: 2, Server: "10.0.0.2:80", }, }, expectedToUpdate: []UpstreamServer{ { ID: 1, Server: "10.0.0.1:80", MaxConns: &maxConns, }, }, name: "update field and delete", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() toAdd, toDelete, toUpdate := determineUpdates(test.updated, test.nginx) if !reflect.DeepEqual(toAdd, test.expectedToAdd) || !reflect.DeepEqual(toDelete, test.expectedToDelete) || !reflect.DeepEqual(toUpdate, test.expectedToUpdate) { t.Errorf("determineUpdates(%v, %v) = (%v, %v, %v)", test.updated, test.nginx, toAdd, toDelete, toUpdate) } }) } } func TestStreamDetermineUpdates(t *testing.T) { t.Parallel() maxConns := 1 tests := []struct { name string updated []StreamUpstreamServer nginx []StreamUpstreamServer expectedToAdd []StreamUpstreamServer expectedToDelete []StreamUpstreamServer expectedToUpdate []StreamUpstreamServer }{ { updated: []StreamUpstreamServer{ { Server: "10.0.0.3:80", }, { Server: "10.0.0.4:80", }, }, nginx: []StreamUpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, }, expectedToAdd: []StreamUpstreamServer{ { Server: "10.0.0.3:80", }, { Server: "10.0.0.4:80", }, }, expectedToDelete: []StreamUpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, }, name: "replace all", }, { updated: []StreamUpstreamServer{ { Server: "10.0.0.2:80", }, { Server: "10.0.0.3:80", }, { Server: "10.0.0.4:80", }, }, nginx: []StreamUpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, { ID: 3, Server: "10.0.0.3:80", }, }, expectedToAdd: []StreamUpstreamServer{ { Server: "10.0.0.4:80", }, }, expectedToDelete: []StreamUpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, }, name: "add and delete", }, { updated: []StreamUpstreamServer{ { Server: "10.0.0.1:80", }, { Server: "10.0.0.2:80", }, { Server: "10.0.0.3:80", }, }, nginx: []StreamUpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, { ID: 3, Server: "10.0.0.3:80", }, }, name: "same", }, { // empty values }, { updated: []StreamUpstreamServer{ { Server: "10.0.0.1:80", MaxConns: &maxConns, }, }, nginx: []StreamUpstreamServer{ { ID: 1, Server: "10.0.0.1:80", }, { ID: 2, Server: "10.0.0.2:80", }, }, expectedToDelete: []StreamUpstreamServer{ { ID: 2, Server: "10.0.0.2:80", }, }, expectedToUpdate: []StreamUpstreamServer{ { ID: 1, Server: "10.0.0.1:80", MaxConns: &maxConns, }, }, name: "update field and delete", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() toAdd, toDelete, toUpdate := determineStreamUpdates(test.updated, test.nginx) if !reflect.DeepEqual(toAdd, test.expectedToAdd) || !reflect.DeepEqual(toDelete, test.expectedToDelete) || !reflect.DeepEqual(toUpdate, test.expectedToUpdate) { t.Errorf("determiteUpdates(%v, %v) = (%v, %v, %v)", test.updated, test.nginx, toAdd, toDelete, toUpdate) } }) } } func TestAddPortToServer(t *testing.T) { t.Parallel() // More info about addresses http://nginx.org/en/docs/http/ngx_http_upstream_module.html#server tests := []struct { address string expected string msg string }{ { address: "example.com:8080", expected: "example.com:8080", msg: "host and port", }, { address: "127.0.0.1:8080", expected: "127.0.0.1:8080", msg: "ipv4 and port", }, { address: "[::]:8080", expected: "[::]:8080", msg: "ipv6 and port", }, { address: "unix:/path/to/socket", expected: "unix:/path/to/socket", msg: "unix socket", }, { address: "example.com", expected: "example.com:80", msg: "host without port", }, { address: "127.0.0.1", expected: "127.0.0.1:80", msg: "ipv4 without port", }, { address: "[::]", expected: "[::]:80", msg: "ipv6 without port", }, } for _, test := range tests { t.Run(test.msg, func(t *testing.T) { t.Parallel() result := addPortToServer(test.address) if result != test.expected { t.Errorf("addPortToServer(%v) returned %v but expected %v for %v", test.address, result, test.expected, test.msg) } }) } } func TestHaveSameParameters(t *testing.T) { t.Parallel() tests := []struct { msg string server UpstreamServer serverNGX UpstreamServer expected bool }{ { server: UpstreamServer{}, serverNGX: UpstreamServer{}, expected: true, msg: "empty", }, { server: UpstreamServer{ID: 2}, serverNGX: UpstreamServer{ID: 3}, expected: true, msg: "different ID", }, { server: UpstreamServer{}, serverNGX: UpstreamServer{ MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Backup: &defaultBackup, Weight: &defaultWeight, Down: &defaultDown, }, expected: true, msg: "default values", }, { server: UpstreamServer{ ID: 1, Server: "127.0.0.1", MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Backup: &defaultBackup, Weight: &defaultWeight, Down: &defaultDown, }, serverNGX: UpstreamServer{ ID: 1, Server: "127.0.0.1", MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Backup: &defaultBackup, Weight: &defaultWeight, Down: &defaultDown, }, expected: true, msg: "same values", }, { server: UpstreamServer{SlowStart: "10s"}, serverNGX: UpstreamServer{}, expected: false, msg: "different SlowStart", }, { server: UpstreamServer{}, serverNGX: UpstreamServer{SlowStart: "10s"}, expected: false, msg: "different SlowStart 2", }, { server: UpstreamServer{SlowStart: "20s"}, serverNGX: UpstreamServer{SlowStart: "10s"}, expected: false, msg: "different SlowStart 3", }, } for _, test := range tests { t.Run(test.msg, func(t *testing.T) { t.Parallel() result := test.server.hasSameParametersAs(test.serverNGX) if result != test.expected { t.Errorf("(%v) hasSameParametersAs (%v) returned %v but expected %v", test.server, test.serverNGX, result, test.expected) } }) } } func TestHaveSameParametersForStream(t *testing.T) { t.Parallel() tests := []struct { msg string server StreamUpstreamServer serverNGX StreamUpstreamServer expected bool }{ { server: StreamUpstreamServer{}, serverNGX: StreamUpstreamServer{}, expected: true, msg: "empty", }, { server: StreamUpstreamServer{ID: 2}, serverNGX: StreamUpstreamServer{ID: 3}, expected: true, msg: "different ID", }, { server: StreamUpstreamServer{}, serverNGX: StreamUpstreamServer{ MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Backup: &defaultBackup, Weight: &defaultWeight, Down: &defaultDown, }, expected: true, msg: "default values", }, { server: StreamUpstreamServer{ ID: 1, Server: "127.0.0.1", MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Backup: &defaultBackup, Weight: &defaultWeight, Down: &defaultDown, }, serverNGX: StreamUpstreamServer{ ID: 1, Server: "127.0.0.1", MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Backup: &defaultBackup, Weight: &defaultWeight, Down: &defaultDown, }, expected: true, msg: "same values", }, { server: StreamUpstreamServer{}, serverNGX: StreamUpstreamServer{SlowStart: "10s"}, expected: false, msg: "different SlowStart", }, { server: StreamUpstreamServer{SlowStart: "20s"}, serverNGX: StreamUpstreamServer{SlowStart: "10s"}, expected: false, msg: "different SlowStart 2", }, } for _, test := range tests { t.Run(test.msg, func(t *testing.T) { t.Parallel() result := test.server.hasSameParametersAs(test.serverNGX) if result != test.expected { t.Errorf("(%v) hasSameParametersAs (%v) returned %v but expected %v", test.server, test.serverNGX, result, test.expected) } }) } } func TestClientWithCheckAPI(t *testing.T) { t.Parallel() // Create a test server that returns supported API versions ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { _, err := w.Write([]byte(`[4, 5, 6, 7, 8, 9]`)) if err != nil { t.Fatalf("unexpected error: %v", err) } })) defer ts.Close() // Test creating a new client with a supported API version on the server client, err := NewNginxClient(ts.URL, WithAPIVersion(7), WithCheckAPI()) if err != nil { t.Fatalf("unexpected error: %v", err) } if client == nil { t.Fatalf("client is nil") } // Test creating a new client with an unsupported API version on the server client, err = NewNginxClient(ts.URL, WithAPIVersion(3), WithCheckAPI()) if err == nil { t.Fatalf("expected error, but got nil") } if client != nil { t.Fatalf("expected client to be nil, but got %v", client) } } func TestClientWithAPIVersion(t *testing.T) { t.Parallel() // Test creating a new client with a supported API version on the client client, err := NewNginxClient("http://api-url", WithAPIVersion(8)) if err != nil { t.Fatalf("unexpected error: %v", err) } if client == nil { t.Fatalf("client is nil") } // Test creating a new client with an unsupported API version on the client client, err = NewNginxClient("http://api-url", WithAPIVersion(3)) if err == nil { t.Fatalf("expected error, but got nil") } if client != nil { t.Fatalf("expected client to be nil, but got %v", client) } } func TestClientWithHTTPClient(t *testing.T) { t.Parallel() // Test creating a new client passing a custom HTTP client client, err := NewNginxClient("http://api-url", WithHTTPClient(&http.Client{})) if err != nil { t.Fatalf("unexpected error: %v", err) } if client == nil { t.Fatalf("client is nil") } // Test creating a new client passing a nil HTTP client client, err = NewNginxClient("http://api-url", WithHTTPClient(nil)) if err == nil { t.Fatalf("expected error, but got nil") } if client != nil { t.Fatalf("expected client to be nil, but got %v", client) } } func TestClientWithMaxAPI(t *testing.T) { t.Parallel() tests := []struct { name string apiVersions string expected int }{ { name: "Test 1: API versions contains invalid version", apiVersions: `[4, 5, 6, 7, 8, 9, 25]`, expected: APIVersion, }, { name: "Test 2: No API versions, default API Version is used", apiVersions: ``, expected: APIVersion, }, { name: "Test 3: API version lower than default", apiVersions: `[4, 5, 6, 7]`, expected: 7, }, { name: "Test 4: No API versions, default API version is used", apiVersions: `[""]`, expected: APIVersion, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() // Test creating a new client with max API version ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch { case r.RequestURI == "/": _, err := w.Write([]byte(tt.apiVersions)) if err != nil { t.Fatalf("unexpected error: %v", err) } default: _, err := w.Write([]byte(`{}`)) if err != nil { t.Fatalf("unexpected error: %v", err) } } })) defer ts.Close() client, err := NewNginxClient(ts.URL, WithMaxAPIVersion()) if err != nil { t.Fatalf("unexpected error: %v", err) } if client == nil { t.Fatalf("client is nil") } if client.apiVersion != tt.expected { t.Fatalf("expected client.apiVersion to be %v, but got %v", tt.expected, client.apiVersion) } }) } } func TestGetStats_NoStreamEndpoint(t *testing.T) { t.Parallel() var writeLock sync.Mutex ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { writeLock.Lock() defer writeLock.Unlock() switch { case r.RequestURI == "/": _, err := w.Write([]byte(`[4, 5, 6, 7, 8, 9]`)) if err != nil { t.Fatalf("unexpected error: %v", err) } case r.RequestURI == "/7/": _, err := w.Write([]byte(`["nginx","processes","connections","slabs","http","resolvers","ssl"]`)) if err != nil { t.Fatalf("unexpected error: %v", err) } case strings.HasPrefix(r.RequestURI, "/7/stream"): t.Fatal("Stream endpoint should not be called since it does not exist.") default: _, err := w.Write([]byte(`{}`)) if err != nil { t.Fatalf("unexpected error: %v", err) } } })) defer ts.Close() // Test creating a new client with a supported API version on the server client, err := NewNginxClient(ts.URL, WithAPIVersion(7), WithCheckAPI()) if err != nil { t.Fatalf("unexpected error: %v", err) } if client == nil { t.Fatalf("client is nil") } stats, err := client.GetStats(context.Background()) if err != nil { t.Fatalf("unexpected error: %v", err) } if !reflect.DeepEqual(stats.StreamServerZones, StreamServerZones{}) { t.Fatalf("StreamServerZones: expected %v, actual %v", StreamServerZones{}, stats.StreamServerZones) } if !reflect.DeepEqual(stats.StreamLimitConnections, StreamLimitConnections{}) { t.Fatalf("StreamLimitConnections: expected %v, actual %v", StreamLimitConnections{}, stats.StreamLimitConnections) } if !reflect.DeepEqual(stats.StreamUpstreams, StreamUpstreams{}) { t.Fatalf("StreamUpstreams: expected %v, actual %v", StreamUpstreams{}, stats.StreamUpstreams) } if stats.StreamZoneSync != nil { t.Fatalf("StreamZoneSync: expected %v, actual %v", nil, stats.StreamZoneSync) } } func TestGetStats_SSL(t *testing.T) { t.Parallel() ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch { case r.RequestURI == "/": _, err := w.Write([]byte(`[4, 5, 6, 7, 8, 9]`)) if err != nil { t.Fatalf("unexpected error: %v", err) } case r.RequestURI == "/8/": _, err := w.Write([]byte(`["nginx","processes","connections","slabs","http","resolvers","ssl","workers"]`)) if err != nil { t.Fatalf("unexpected error: %v", err) } case strings.HasPrefix(r.RequestURI, "/8/ssl"): _, err := w.Write([]byte(`{ "handshakes" : 79572, "handshakes_failed" : 21025, "session_reuses" : 15762, "no_common_protocol" : 4, "no_common_cipher" : 2, "handshake_timeout" : 0, "peer_rejected_cert" : 0, "verify_failures" : { "no_cert" : 0, "expired_cert" : 2, "revoked_cert" : 1, "hostname_mismatch" : 2, "other" : 1 } }`)) if err != nil { t.Fatalf("unexpected error: %v", err) } case strings.HasPrefix(r.RequestURI, "/8/stream"): _, err := w.Write([]byte(`[""]`)) if err != nil { t.Fatalf("unexpected error: %v", err) } default: _, err := w.Write([]byte(`{}`)) if err != nil { t.Fatalf("unexpected error: %v", err) } } })) defer ts.Close() // Test creating a new client with a supported API version on the server client, err := NewNginxClient(ts.URL, WithAPIVersion(8), WithCheckAPI()) if err != nil { t.Fatalf("unexpected error: %v", err) } if client == nil { t.Fatalf("client is nil") } stats, err := client.GetStats(context.Background()) if err != nil { t.Fatalf("unexpected error: %v", err) } testStats := SSL{ Handshakes: 79572, HandshakesFailed: 21025, SessionReuses: 15762, NoCommonProtocol: 4, NoCommonCipher: 2, HandshakeTimeout: 0, PeerRejectedCert: 0, VerifyFailures: VerifyFailures{ NoCert: 0, ExpiredCert: 2, RevokedCert: 1, HostnameMismatch: 2, Other: 1, }, } if !reflect.DeepEqual(stats.SSL, testStats) { t.Fatalf("SSL stats: expected %v, actual %v", testStats, stats.SSL) } } func TestGetMaxAPIVersionServer(t *testing.T) { t.Parallel() ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch { case r.RequestURI == "/": _, err := w.Write([]byte(`[4, 5, 6, 7]`)) if err != nil { t.Fatalf("unexpected error: %v", err) } default: _, err := w.Write([]byte(`{}`)) if err != nil { t.Fatalf("unexpected error: %v", err) } } })) defer ts.Close() c, err := NewNginxClient(ts.URL) if err != nil { t.Fatalf("unexpected error: %v", err) } maxVer, err := c.GetMaxAPIVersion(context.Background()) if err != nil { t.Fatalf("unexpected error: %v", err) } if maxVer != 7 { t.Fatalf("expected 7, got %v", maxVer) } } func TestGetMaxAPIVersionClient(t *testing.T) { t.Parallel() ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch { case r.RequestURI == "/": _, err := w.Write([]byte(`[4, 5, 6, 7, 8, 9, 25]`)) if err != nil { t.Fatalf("unexpected error: %v", err) } default: _, err := w.Write([]byte(`{}`)) if err != nil { t.Fatalf("unexpected error: %v", err) } } })) defer ts.Close() c, err := NewNginxClient(ts.URL) if err != nil { t.Fatalf("unexpected error: %v", err) } maxVer, err := c.GetMaxAPIVersion(context.Background()) if err != nil { t.Fatalf("unexpected error: %v", err) } if maxVer != c.apiVersion { t.Fatalf("expected %v, got %v", c.apiVersion, maxVer) } } func TestExtractPlusVersion(t *testing.T) { t.Parallel() tests := []struct { name string version string expected int }{ { name: "r32", version: "nginx-plus-r32", expected: 32, }, { name: "r32p1", version: "nginx-plus-r32-p1", expected: 32, }, { name: "r32p2", version: "nginx-plus-r32-p2", expected: 32, }, { name: "r33", version: "nginx-plus-r33", expected: 33, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() version, err := extractPlusVersionValues(test.version) if err != nil { t.Error(err) } if version != test.expected { t.Errorf("values do not match, got: %d, expected %d)", version, test.expected) } }) } } func TestExtractPlusVersionNegativeCase(t *testing.T) { t.Parallel() tests := []struct { name string version string }{ { name: "no-number", version: "nginx-plus-rxx", }, { name: "extra-chars", version: "nginx-plus-rxx4343", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { t.Parallel() _, err := extractPlusVersionValues(test.version) if err == nil { t.Errorf("Expected error but got %v", err) } }) } } func TestUpdateHTTPServers(t *testing.T) { t.Parallel() testcases := map[string]struct { reqServers []UpstreamServer responses []response expAdded, expDeleted, expUpdated int expErr bool }{ "successfully add 1 server": { reqServers: []UpstreamServer{{Server: "127.0.0.1:80"}}, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, }, // response for addHTTPServer POST server for http server { statusCode: http.StatusCreated, }, }, expAdded: 1, }, "successfully update 1 server": { reqServers: []UpstreamServer{{Server: "127.0.0.1:80"}}, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{ {ID: 1, Server: "127.0.0.1:80", Route: "/test"}, }, }, // response for UpdateHTTPServer PATCH server for http server { statusCode: http.StatusOK, }, }, expUpdated: 1, }, "successfully delete 1 server": { reqServers: []UpstreamServer{{Server: "127.0.0.1:80"}}, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{ {ID: 1, Server: "127.0.0.1:80"}, {ID: 2, Server: "127.0.0.2:80"}, }, }, // response for deleteHTTPServer DELETE server for http server { statusCode: http.StatusOK, }, }, expDeleted: 1, }, "successfully add 1 server, update 1 server, delete 1 server": { reqServers: []UpstreamServer{ {Server: "127.0.0.1:80", Route: "/test"}, {Server: "127.0.0.2:80"}, }, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{ {ID: 1, Server: "127.0.0.1:80"}, {ID: 2, Server: "127.0.0.3:80"}, }, }, // response for addHTTPServer POST server for http server { statusCode: http.StatusCreated, }, // response for deleteHTTPServer DELETE server for http server { statusCode: http.StatusOK, }, // response for UpdateHTTPServer PATCH server for http server { statusCode: http.StatusOK, }, }, expAdded: 1, expUpdated: 1, expDeleted: 1, }, "successfully add 1 server with ignored identical duplicate": { reqServers: []UpstreamServer{ {Server: "127.0.0.1:80", Route: "/test"}, {Server: "127.0.0.1", Route: "/test"}, {Server: "127.0.0.1:80", Route: "/test", MaxConns: &defaultMaxConns}, {Server: "127.0.0.1:80", Route: "/test", Backup: &defaultBackup}, {Server: "127.0.0.1", Route: "/test", SlowStart: defaultSlowStart}, }, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{}, }, // response for addHTTPServer POST server for http server { statusCode: http.StatusCreated, }, }, expAdded: 1, }, "successfully add 1 server, receive 1 error for non-identical duplicates": { reqServers: []UpstreamServer{ {Server: "127.0.0.1:80", Route: "/test"}, {Server: "127.0.0.1:80", Route: "/test"}, {Server: "127.0.0.2:80", Route: "/test1"}, {Server: "127.0.0.2:80", Route: "/test2"}, {Server: "127.0.0.2:80", Route: "/test3"}, }, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{}, }, // response for addHTTPServer POST server for http server { statusCode: http.StatusCreated, }, }, expAdded: 1, expErr: true, }, "successfully add 1 server, receive 1 error": { reqServers: []UpstreamServer{ {Server: "127.0.0.1:80"}, {Server: "127.0.0.1:443"}, }, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{}, }, // response for addHTTPServer POST server for server1 { statusCode: http.StatusInternalServerError, servers: []UpstreamServer{}, }, // response for addHTTPServer POST server for server2 { statusCode: http.StatusCreated, servers: []UpstreamServer{}, }, }, expAdded: 1, expErr: true, }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { t.Parallel() var requests []*http.Request handler := &fakeHandler{ func(w http.ResponseWriter, r *http.Request) { requests = append(requests, r) if len(tc.responses) == 0 { t.Fatal("ran out of responses") } if r.Method == http.MethodPost || r.Method == http.MethodPut { contentType, ok := r.Header["Content-Type"] if !ok { t.Fatalf("expected request type %s to have a Content-Type header", r.Method) } if len(contentType) != 1 || contentType[0] != "application/json" { t.Fatalf("expected request type %s to have a Content-Type header value of 'application/json'", r.Method) } } re := tc.responses[0] tc.responses = tc.responses[1:] w.WriteHeader(re.statusCode) resp, err := json.Marshal(re.servers) if err != nil { t.Fatal(err) } _, err = w.Write(resp) if err != nil { t.Fatal(err) } }, } server := httptest.NewServer(handler) defer server.Close() client, err := NewNginxClient(server.URL, WithHTTPClient(&http.Client{})) if err != nil { t.Fatal(err) } added, deleted, updated, err := client.UpdateHTTPServers(context.Background(), "fakeUpstream", tc.reqServers) if tc.expErr && err == nil { t.Fatal("expected to receive an error") } if !tc.expErr && err != nil { t.Fatalf("received an unexpected error: %v", err) } if len(added) != tc.expAdded { t.Fatalf("expected to get %d added server(s), instead got %d", tc.expAdded, len(added)) } if len(deleted) != tc.expDeleted { t.Fatalf("expected to get %d deleted server(s), instead got %d", tc.expDeleted, len(deleted)) } if len(updated) != tc.expUpdated { t.Fatalf("expected to get %d updated server(s), instead got %d", tc.expUpdated, len(updated)) } if len(tc.responses) != 0 { t.Fatalf("did not use all expected responses, %d unused", len(tc.responses)) } }) } } func TestUpdateStreamServers(t *testing.T) { t.Parallel() testcases := map[string]struct { reqServers []StreamUpstreamServer responses []response expAdded, expDeleted, expUpdated int expErr bool }{ "successfully add 1 server": { reqServers: []StreamUpstreamServer{{Server: "127.0.0.1:80"}}, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, }, // response for addStreamServer POST server for stream server { statusCode: http.StatusCreated, }, }, expAdded: 1, }, "successfully update 1 server": { reqServers: []StreamUpstreamServer{{Server: "127.0.0.1:80"}}, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []StreamUpstreamServer{ {ID: 1, Server: "127.0.0.1:80", SlowStart: "30s"}, }, }, // response for UpdateStreamServer PATCH server for stream server { statusCode: http.StatusOK, }, }, expUpdated: 1, }, "successfully delete 1 server": { reqServers: []StreamUpstreamServer{{Server: "127.0.0.1:80"}}, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []StreamUpstreamServer{ {ID: 1, Server: "127.0.0.1:80"}, {ID: 2, Server: "127.0.0.2:80"}, }, }, // response for deleteStreamServer DELETE server for stream server { statusCode: http.StatusOK, }, }, expDeleted: 1, }, "successfully add 1 server, update 1 server, delete 1 server": { reqServers: []StreamUpstreamServer{ {Server: "127.0.0.1:80", SlowStart: "30s"}, {Server: "127.0.0.2:80"}, }, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []StreamUpstreamServer{ {ID: 1, Server: "127.0.0.1:80"}, {ID: 2, Server: "127.0.0.3:80"}, }, }, // response for addStreamServer POST server for stream server { statusCode: http.StatusCreated, }, // response for deleteStreamServer DELETE server for stream server { statusCode: http.StatusOK, }, // response for UpdateStreamServer PATCH server for stream server { statusCode: http.StatusOK, }, }, expAdded: 1, expUpdated: 1, expDeleted: 1, }, "successfully add 1 server with ignored identical duplicate": { reqServers: []StreamUpstreamServer{ {Server: "127.0.0.1:80", SlowStart: "30s"}, {Server: "127.0.0.1", SlowStart: "30s"}, {Server: "127.0.0.1:80", SlowStart: "30s", MaxConns: &defaultMaxConns}, {Server: "127.0.0.1", SlowStart: "30s", MaxFails: &defaultMaxFails}, {Server: "127.0.0.1", SlowStart: "30s", FailTimeout: defaultFailTimeout}, }, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{}, }, // response for addStreamServer POST server for stream server { statusCode: http.StatusCreated, }, }, expAdded: 1, }, "successfully add 1 server, receive 1 error for non-identical duplicates": { reqServers: []StreamUpstreamServer{ {Server: "127.0.0.1:80", SlowStart: "30s"}, {Server: "127.0.0.1:80", SlowStart: "30s"}, {Server: "127.0.0.2:80", SlowStart: "10s"}, {Server: "127.0.0.2:80", SlowStart: "20s"}, {Server: "127.0.0.2:80", SlowStart: "30s"}, }, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{}, }, // response for addStreamServer POST server for stream server { statusCode: http.StatusCreated, }, }, expAdded: 1, expErr: true, }, "successfully add 1 server, receive 1 error": { reqServers: []StreamUpstreamServer{ {Server: "127.0.0.1:2000"}, {Server: "127.0.0.1:3000"}, }, responses: []response{ // response for first serversInNginx GET servers { statusCode: http.StatusOK, servers: []UpstreamServer{}, }, // response for addStreamServer POST server for server1 { statusCode: http.StatusInternalServerError, servers: []UpstreamServer{}, }, // response for addStreamServer POST server for server2 { statusCode: http.StatusCreated, servers: []UpstreamServer{}, }, }, expAdded: 1, expErr: true, }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { t.Parallel() var requests []*http.Request handler := &fakeHandler{ func(w http.ResponseWriter, r *http.Request) { requests = append(requests, r) if len(tc.responses) == 0 { t.Fatal("ran out of responses") } if r.Method == http.MethodPost || r.Method == http.MethodPut { contentType, ok := r.Header["Content-Type"] if !ok { t.Fatalf("expected request type %s to have a Content-Type header", r.Method) } if len(contentType) != 1 || contentType[0] != "application/json" { t.Fatalf("expected request type %s to have a Content-Type header value of 'application/json'", r.Method) } } re := tc.responses[0] tc.responses = tc.responses[1:] w.WriteHeader(re.statusCode) resp, err := json.Marshal(re.servers) if err != nil { t.Fatal(err) } _, err = w.Write(resp) if err != nil { t.Fatal(err) } }, } server := httptest.NewServer(handler) defer server.Close() client, err := NewNginxClient(server.URL, WithHTTPClient(&http.Client{})) if err != nil { t.Fatal(err) } added, deleted, updated, err := client.UpdateStreamServers(context.Background(), "fakeUpstream", tc.reqServers) if tc.expErr && err == nil { t.Fatal("expected to receive an error") } if !tc.expErr && err != nil { t.Fatalf("received an unexpected error: %v", err) } if len(added) != tc.expAdded { t.Fatalf("expected to get %d added server(s), instead got %d", tc.expAdded, len(added)) } if len(deleted) != tc.expDeleted { t.Fatalf("expected to get %d deleted server(s), instead got %d", tc.expDeleted, len(deleted)) } if len(updated) != tc.expUpdated { t.Fatalf("expected to get %d updated server(s), instead got %d", tc.expUpdated, len(updated)) } if len(tc.responses) != 0 { t.Fatalf("did not use all expected responses, %d unused", len(tc.responses)) } }) } } type response struct { servers interface{} statusCode int } type fakeHandler struct { handler func(w http.ResponseWriter, r *http.Request) } func (h *fakeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.handler(w, r) } nginx-plus-go-client-2.3.0/compose.yaml000066400000000000000000000030551474621132500200400ustar00rootroot00000000000000services: nginx: image: nginx-plus build: dockerfile: docker/Dockerfile secrets: - nginx-repo.crt - nginx-repo.key volumes: - type: bind source: ./docker/nginx.conf target: /etc/nginx/nginx.conf networks: default: aliases: - nginx-plus-test ports: - 8080 nginx-no-stream: extends: service: nginx volumes: - type: bind source: ./docker/nginx_no_stream.conf target: /etc/nginx/nginx.conf nginx-helper: extends: service: nginx test: image: golang:1.23 volumes: - type: bind source: ./ target: /go/src/github.com/nginx/nginx-plus-go-client working_dir: /go/src/github.com/nginx/nginx-plus-go-client command: go test -v -shuffle=on -race tests/client_test.go depends_on: - nginx - nginx-helper environment: - TEST_API_ENDPOINT=http://nginx:8080/api - TEST_API_ENDPOINT_OF_HELPER=http://nginx-helper:8080/api - TEST_UNAVAILABLE_STREAM_ADDRESS=nginx:8081 test-no-stream: extends: service: test command: go test -v -shuffle=on -race tests/client_no_stream_test.go depends_on: - nginx-no-stream - nginx-helper environment: - TEST_API_ENDPOINT=http://nginx-no-stream:8080/api - TEST_API_ENDPOINT_OF_HELPER=http://nginx-helper:8080/api - TEST_UNAVAILABLE_STREAM_ADDRESS=nginx-no-stream:8081 secrets: nginx-repo.crt: file: ./docker/nginx-repo.crt nginx-repo.key: file: ./docker/nginx-repo.key nginx-plus-go-client-2.3.0/docker/000077500000000000000000000000001474621132500167535ustar00rootroot00000000000000nginx-plus-go-client-2.3.0/docker/Dockerfile000066400000000000000000000026551474621132500207550ustar00rootroot00000000000000# syntax=docker/dockerfile:1.13 FROM debian:12-slim LABEL maintainer="NGINX Docker Maintainers " ARG NGINX_PLUS_VERSION=R32 # Install NGINX Plus # Download certificate and key from the customer portal (https://my.f5.com) # and copy to the build context RUN --mount=type=secret,id=nginx-repo.crt,dst=/etc/ssl/nginx/nginx-repo.crt,mode=0644 \ --mount=type=secret,id=nginx-repo.key,dst=/etc/ssl/nginx/nginx-repo.key,mode=0644 \ <<"eot" bash -euo pipefail apt-get update apt-get install --no-install-recommends --no-install-suggests -y ca-certificates gnupg curl apt-transport-https curl -fsSL https://cs.nginx.com/static/keys/nginx_signing.key | gpg --dearmor > /etc/apt/trusted.gpg.d/nginx_signing.gpg curl -fsSL -o /etc/apt/apt.conf.d/90pkgs-nginx https://cs.nginx.com/static/files/90pkgs-nginx DEBIAN_VERSION=$(awk -F '=' '/^VERSION_CODENAME=/ {print $2}' /etc/os-release) printf "%s\n" "deb https://pkgs.nginx.com/plus/${NGINX_PLUS_VERSION}/debian ${DEBIAN_VERSION} nginx-plus" > /etc/apt/sources.list.d/nginx-plus.list apt-get update apt-get install -y nginx-plus apt-get remove --purge --auto-remove -y gnupg rm -rf /var/lib/apt/lists/* rm /etc/apt/apt.conf.d/90pkgs-nginx /etc/apt/sources.list.d/nginx-plus.list eot EXPOSE 8080 8081 STOPSIGNAL SIGQUIT RUN rm -rf /etc/nginx/conf.d/* COPY --link docker/test.conf /etc/nginx/conf.d/ CMD ["nginx", "-g", "daemon off;"] nginx-plus-go-client-2.3.0/docker/nginx.conf000066400000000000000000000025011474621132500207430ustar00rootroot00000000000000 user nginx; worker_processes auto; error_log stderr notice; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /dev/stdout main; sendfile on; #tcp_nopush on; keepalive_timeout 65; #gzip on; keyval_zone zone=zone_one:32k; keyval $arg_text $text zone=zone_one; include /etc/nginx/conf.d/*.conf; } stream { keyval_zone zone=zone_one_stream:32k; keyval $hostname $text zone=zone_one_stream; keyval_zone zone=zone_test_sync:32k timeout=5s sync; limit_conn_zone $binary_remote_addr zone=addr_stream:10m; limit_conn addr_stream 1; upstream stream_test { zone stream_test 64k; } server { listen 8081; proxy_pass stream_test; status_zone stream_test; health_check interval=10 fails=3 passes=1; } resolver 127.0.0.11 valid=5s status_zone=resolver_test; server { listen 7777; zone_sync; zone_sync_server nginx-plus-test:7777 resolve; } } nginx-plus-go-client-2.3.0/docker/nginx_no_stream.conf000066400000000000000000000011501474621132500230110ustar00rootroot00000000000000 user nginx; worker_processes auto; error_log stderr notice; pid /var/run/nginx.pid; events { worker_connections 1024; } http { include /etc/nginx/mime.types; default_type application/octet-stream; log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /dev/stdout main; sendfile on; #tcp_nopush on; keepalive_timeout 65; #gzip on; include /etc/nginx/conf.d/*.conf; } nginx-plus-go-client-2.3.0/docker/test.conf000066400000000000000000000013231474621132500206000ustar00rootroot00000000000000upstream test { zone test 64k; } proxy_cache_path /var/cache/nginx keys_zone=http_cache:10m max_size=100m; limit_req_zone $binary_remote_addr zone=one:10m rate=1500r/s; limit_conn_zone $binary_remote_addr zone=addr:10m; server { listen 8080; limit_req zone=one burst=100; limit_conn addr 10; location = /dashboard.html { root /usr/share/nginx/html; } location /api { status_zone location_test; api write=on; } location /test { proxy_pass http://test; proxy_cache http_cache; health_check interval=10 fails=3 passes=1; } status_zone test; } upstream test-drain { zone test-drain 64k; server 127.0.0.1:9001 drain; } nginx-plus-go-client-2.3.0/go.mod000066400000000000000000000001361474621132500166120ustar00rootroot00000000000000module github.com/nginx/nginx-plus-go-client/v2 go 1.22.6 require golang.org/x/sync v0.10.0 nginx-plus-go-client-2.3.0/go.sum000066400000000000000000000002331474621132500166350ustar00rootroot00000000000000golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= nginx-plus-go-client-2.3.0/release-process.md000066400000000000000000000030141474621132500211200ustar00rootroot00000000000000# Release Process This document outlines the steps involved in the release process for the NGINX Plus Go Client project. ## Table of Contents - [Versioning](#versioning) - [Release Planning and Development](#release-planning-and-development) - [Releasing a New Version](#releasing-a-new-version) ## Versioning The project follows [Semantic Versioning](https://semver.org/) for versioning. ## Release Planning and Development The features that will go into the next release are reflected in the corresponding [milestone](https://github.com/nginx/nginx-plus-go-client/milestones). Refer to the [Issue Lifecycle](/ISSUE_LIFECYCLE.md) document for information on issue creation and assignment to releases. ## Releasing a New Version 1. Create an issue to define and track release-related activities. Choose a title that follows the format `Release X.Y.Z`. 2. Stop merging any new work into the main branch. 3. Check the release draft under the [GitHub releases](https://github.com/nginx/nginx-plus-go-client/releases) page to ensure that everything is in order. 4. Create and push the release tag in the format `vX.Y.Z`: ```bash git tag -a vX.Y.Z -m "Release vX.Y.Z" git push origin vX.Y.Z ``` As a result, the CI/CD pipeline will publish the release and announce it in the community Slack. nginx-plus-go-client-2.3.0/renovate.json000066400000000000000000000002241474621132500202200ustar00rootroot00000000000000{ "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ "github>nginx/k8s-common", "schedule:earlyMondays" ] } nginx-plus-go-client-2.3.0/tests/000077500000000000000000000000001474621132500166465ustar00rootroot00000000000000nginx-plus-go-client-2.3.0/tests/client_no_stream_test.go000066400000000000000000000022401474621132500235570ustar00rootroot00000000000000package tests import ( "context" "testing" "github.com/nginx/nginx-plus-go-client/v2/client" "github.com/nginx/nginx-plus-go-client/v2/tests/helpers" ) // TestStatsNoStream tests the peculiar behavior of getting Stream-related // stats from the API when there are no stream blocks in the config. // The API returns a special error code that we can use to determine if the API // is misconfigured or of the stream block is missing. func TestStatsNoStream(t *testing.T) { t.Parallel() c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } ctx := context.Background() stats, err := c.GetStats(ctx) if err != nil { t.Errorf("Error getting stats: %v", err) } if stats.Connections.Accepted < 1 { t.Errorf("Stats should report some connections: %v", stats.Connections) } if len(stats.StreamServerZones) != 0 { t.Error("No stream block should result in no StreamServerZones") } if len(stats.StreamUpstreams) != 0 { t.Error("No stream block should result in no StreamUpstreams") } if stats.StreamZoneSync != nil { t.Error("No stream block should result in StreamZoneSync = `nil`") } } nginx-plus-go-client-2.3.0/tests/client_test.go000066400000000000000000001066521474621132500215240ustar00rootroot00000000000000package tests import ( "context" "net" "reflect" "testing" "time" "github.com/nginx/nginx-plus-go-client/v2/client" "github.com/nginx/nginx-plus-go-client/v2/tests/helpers" ) const ( cacheZone = "http_cache" upstream = "test" streamUpstream = "stream_test" streamZoneSync = "zone_test_sync" locationZone = "location_test" resolverMetric = "resolver_test" reqZone = "one" connZone = "addr" streamConnZone = "addr_stream" ) var ( defaultMaxConns = 0 defaultMaxFails = 1 defaultFailTimeout = "10s" defaultSlowStart = "0s" defaultBackup = false defaultDown = false defaultWeight = 1 ) //nolint:paralleltest func TestStreamClient(t *testing.T) { c, err := client.NewNginxClient( helpers.GetAPIEndpoint(), client.WithCheckAPI(), ) if err != nil { t.Fatalf("Error when creating a client: %v", err) } streamServer := client.StreamUpstreamServer{ Server: "127.0.0.1:8001", } // test adding a stream server ctx := context.Background() err = c.AddStreamServer(ctx, streamUpstream, streamServer) if err != nil { t.Fatalf("Error when adding a server: %v", err) } err = c.AddStreamServer(ctx, streamUpstream, streamServer) if err == nil { t.Errorf("Adding a duplicated server succeeded") } // test updating a stream server streamServers, err := c.GetStreamServers(ctx, streamUpstream) if err != nil { t.Errorf("Error getting stream servers: %v", err) } if len(streamServers) != 1 { t.Errorf("Expected 1 servers, got %v", streamServers) } streamServers[0].SlowStart = "30s" err = c.UpdateStreamServer(ctx, streamUpstream, streamServers[0]) if err != nil { t.Errorf("Error when updating a server: %v", err) } streamServers, err = c.GetStreamServers(ctx, streamUpstream) if err != nil { t.Errorf("Error getting stream servers: %v", err) } if len(streamServers) != 1 { t.Errorf("Expected 1 servers, got %v", streamServers) } if streamServers[0].SlowStart != "30s" { t.Errorf("The server wasn't successfully updated: expected a 'SlowStart' of 30s, actual was %s", streamServers[0].SlowStart) } streamServers[0].ID++ err = c.UpdateStreamServer(ctx, streamUpstream, streamServers[0]) if err == nil { t.Errorf("Updating a server without a matching server ID succeeded") } // test deleting a stream server err = c.DeleteStreamServer(ctx, streamUpstream, streamServer.Server) if err != nil { t.Fatalf("Error when deleting a server: %v", err) } err = c.DeleteStreamServer(ctx, streamUpstream, streamServer.Server) if err == nil { t.Errorf("Deleting a nonexisting server succeeded") } streamServers, err = c.GetStreamServers(ctx, streamUpstream) if err != nil { t.Errorf("Error getting stream servers: %v", err) } if len(streamServers) != 0 { t.Errorf("Expected 0 servers, got %v", streamServers) } // test updating stream servers streamServers1 := []client.StreamUpstreamServer{ { Server: "127.0.0.1:8001", }, { Server: "127.0.0.2:8002", }, { Server: "127.0.0.3:8003", }, } streamAdded, streamDeleted, streamUpdated, err := c.UpdateStreamServers(ctx, streamUpstream, streamServers1) if err != nil { t.Fatalf("Error when updating servers: %v", err) } if len(streamAdded) != len(streamServers1) { t.Errorf("The number of added servers %v != %v", len(streamAdded), len(streamServers1)) } if len(streamDeleted) != 0 { t.Errorf("The number of deleted servers %v != 0", len(streamDeleted)) } if len(streamUpdated) != 0 { t.Errorf("The number of updated servers %v != 0", len(streamUpdated)) } // test getting servers streamServers, err = c.GetStreamServers(ctx, streamUpstream) if err != nil { t.Fatalf("Error when getting servers: %v", err) } if !compareStreamUpstreamServers(streamServers1, streamServers) { t.Errorf("Return servers %v != added servers %v", streamServers, streamServers1) } // updating with the same servers added, deleted, updated, err := c.UpdateStreamServers(ctx, streamUpstream, streamServers1) if err != nil { t.Fatalf("Error when updating servers: %v", err) } if len(added) != 0 { t.Errorf("The number of added servers %v != 0", len(added)) } if len(deleted) != 0 { t.Errorf("The number of deleted servers %v != 0", len(deleted)) } if len(updated) != 0 { t.Errorf("The number of updated servers %v != 0", len(updated)) } // updating one server with different parameters newMaxConns := 5 newMaxFails := 6 newFailTimeout := "15s" newSlowStart := "10s" streamServers[0].MaxConns = &newMaxConns streamServers[0].MaxFails = &newMaxFails streamServers[0].FailTimeout = newFailTimeout streamServers[0].SlowStart = newSlowStart // updating one server with only one different parameter streamServers[1].SlowStart = newSlowStart added, deleted, updated, err = c.UpdateStreamServers(ctx, streamUpstream, streamServers) if err != nil { t.Fatalf("Error when updating server with different parameters: %v", err) } if len(added) != 0 { t.Errorf("The number of added servers %v != 0", len(added)) } if len(deleted) != 0 { t.Errorf("The number of deleted servers %v != 0", len(deleted)) } if len(updated) != 2 { t.Errorf("The number of updated servers %v != 2", len(updated)) } streamServers, err = c.GetStreamServers(ctx, streamUpstream) if err != nil { t.Fatalf("Error when getting servers: %v", err) } for _, srv := range streamServers { if srv.Server == streamServers[0].Server { if *srv.MaxConns != newMaxConns { t.Errorf("The parameter MaxConns of the updated server %v is != %v", *srv.MaxConns, newMaxConns) } if *srv.MaxFails != newMaxFails { t.Errorf("The parameter MaxFails of the updated server %v is != %v", *srv.MaxFails, newMaxFails) } if srv.FailTimeout != newFailTimeout { t.Errorf("The parameter FailTimeout of the updated server %v is != %v", srv.FailTimeout, newFailTimeout) } if srv.SlowStart != newSlowStart { t.Errorf("The parameter SlowStart of the updated server %v is != %v", srv.SlowStart, newSlowStart) } } if srv.Server == streamServers[1].Server { if *srv.MaxConns != defaultMaxConns { t.Errorf("The parameter MaxConns of the updated server %v is != %v", *srv.MaxConns, defaultMaxConns) } if *srv.MaxFails != defaultMaxFails { t.Errorf("The parameter MaxFails of the updated server %v is != %v", *srv.MaxFails, defaultMaxFails) } if srv.FailTimeout != defaultFailTimeout { t.Errorf("The parameter FailTimeout of the updated server %v is != %v", srv.FailTimeout, defaultFailTimeout) } if srv.SlowStart != newSlowStart { t.Errorf("The parameter SlowStart of the updated server %v is != %v", srv.SlowStart, newSlowStart) } } } streamServers2 := []client.StreamUpstreamServer{ { Server: "127.0.0.2:8003", }, { Server: "127.0.0.2:8004", }, { Server: "127.0.0.2:8005", }, } // updating with 2 new servers, 1 existing added, deleted, updated, err = c.UpdateStreamServers(ctx, streamUpstream, streamServers2) if err != nil { t.Fatalf("Error when updating servers: %v", err) } if len(added) != 3 { t.Errorf("The number of added servers %v != 3", len(added)) } if len(deleted) != 3 { t.Errorf("The number of deleted servers %v != 3", len(deleted)) } if len(updated) != 0 { t.Errorf("The number of updated servers %v != 0", len(updated)) } // updating with zero servers - removing added, deleted, updated, err = c.UpdateStreamServers(ctx, streamUpstream, []client.StreamUpstreamServer{}) if err != nil { t.Fatalf("Error when updating servers: %v", err) } if len(added) != 0 { t.Errorf("The number of added servers %v != 0", len(added)) } if len(deleted) != 3 { t.Errorf("The number of deleted servers %v != 3", len(deleted)) } if len(updated) != 0 { t.Errorf("The number of updated servers %v != 0", len(updated)) } // test getting servers again servers, err := c.GetStreamServers(ctx, streamUpstream) if err != nil { t.Fatalf("Error when getting servers: %v", err) } if len(servers) != 0 { t.Errorf("The number of servers %v != 0", len(servers)) } } func TestStreamUpstreamServer(t *testing.T) { t.Parallel() c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } maxFails := 64 weight := 10 maxConns := 321 backup := true down := true streamServer := client.StreamUpstreamServer{ Server: "127.0.0.1:2000", MaxConns: &maxConns, MaxFails: &maxFails, FailTimeout: "21s", SlowStart: "12s", Weight: &weight, Backup: &backup, Down: &down, } ctx := context.Background() err = c.AddStreamServer(ctx, streamUpstream, streamServer) if err != nil { t.Errorf("Error adding upstream server: %v", err) } servers, err := c.GetStreamServers(ctx, streamUpstream) if err != nil { t.Fatalf("Error getting stream servers: %v", err) } if len(servers) != 1 { t.Errorf("Too many servers") } // don't compare IDs servers[0].ID = 0 if !reflect.DeepEqual(streamServer, servers[0]) { t.Errorf("Expected: %v Got: %v", streamServer, servers[0]) } // remove stream upstream servers _, _, _, err = c.UpdateStreamServers(ctx, streamUpstream, []client.StreamUpstreamServer{}) if err != nil { t.Errorf("Couldn't remove servers: %v", err) } } //nolint:paralleltest func TestClient(t *testing.T) { c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error when creating a client: %v", err) } // test checking an upstream for existence ctx := context.Background() err = c.CheckIfUpstreamExists(ctx, upstream) if err != nil { t.Fatalf("Error when checking an upstream for existence: %v", err) } err = c.CheckIfUpstreamExists(ctx, "random") if err == nil { t.Errorf("Nonexisting upstream exists") } server := client.UpstreamServer{ Server: "127.0.0.1:8001", } // test adding a http server err = c.AddHTTPServer(ctx, upstream, server) if err != nil { t.Fatalf("Error when adding a server: %v", err) } err = c.AddHTTPServer(ctx, upstream, server) if err == nil { t.Errorf("Adding a duplicated server succeeded") } // test updating an http server servers, err := c.GetHTTPServers(ctx, upstream) if err != nil { t.Errorf("Error getting servers: %v", err) } if len(servers) != 1 { t.Errorf("Expected 1 servers, got %v", servers) } servers[0].SlowStart = "30s" err = c.UpdateHTTPServer(ctx, upstream, servers[0]) if err != nil { t.Errorf("Error when updating a server: %v", err) } servers, err = c.GetHTTPServers(ctx, upstream) if err != nil { t.Errorf("Error getting servers: %v", err) } if len(servers) != 1 { t.Errorf("Expected 1 servers, got %v", servers) } if servers[0].SlowStart != "30s" { t.Errorf("The server wasn't successfully updated: expected a 'SlowStart' of 30s, actual was %s", servers[0].SlowStart) } servers[0].ID++ err = c.UpdateHTTPServer(ctx, upstream, servers[0]) if err == nil { t.Errorf("Updating a server without a matching server ID succeeded") } // test deleting a http server err = c.DeleteHTTPServer(ctx, upstream, server.Server) if err != nil { t.Fatalf("Error when deleting a server: %v", err) } err = c.DeleteHTTPServer(ctx, upstream, server.Server) if err == nil { t.Errorf("Deleting a nonexisting server succeeded") } // test updating servers servers1 := []client.UpstreamServer{ { Server: "127.0.0.2:8001", }, { Server: "127.0.0.2:8002", }, { Server: "127.0.0.2:8003", }, } added, deleted, updated, err := c.UpdateHTTPServers(ctx, upstream, servers1) if err != nil { t.Fatalf("Error when updating servers: %v", err) } if len(added) != len(servers1) { t.Errorf("The number of added servers %v != %v", len(added), len(servers1)) } if len(deleted) != 0 { t.Errorf("The number of deleted servers %v != 0", len(deleted)) } if len(updated) != 0 { t.Errorf("The number of updated servers %v != 0", len(updated)) } // test getting servers servers, err = c.GetHTTPServers(ctx, upstream) if err != nil { t.Fatalf("Error when getting servers: %v", err) } if !compareUpstreamServers(servers1, servers) { t.Errorf("Return servers %v != added servers %v", servers, servers1) } // continue test updating servers // updating with the same servers added, deleted, updated, err = c.UpdateHTTPServers(ctx, upstream, servers1) if err != nil { t.Fatalf("Error when updating servers: %v", err) } if len(added) != 0 { t.Errorf("The number of added servers %v != 0", len(added)) } if len(deleted) != 0 { t.Errorf("The number of deleted servers %v != 0", len(deleted)) } if len(updated) != 0 { t.Errorf("The number of updated servers %v != 0", len(updated)) } // updating one server with different parameters newMaxConns := 5 newMaxFails := 6 newFailTimeout := "15s" newSlowStart := "10s" servers[0].MaxConns = &newMaxConns servers[0].MaxFails = &newMaxFails servers[0].FailTimeout = newFailTimeout servers[0].SlowStart = newSlowStart // updating one server with only one different parameter servers[1].SlowStart = newSlowStart added, deleted, updated, err = c.UpdateHTTPServers(ctx, upstream, servers) if err != nil { t.Fatalf("Error when updating server with different parameters: %v", err) } if len(added) != 0 { t.Errorf("The number of added servers %v != 0", len(added)) } if len(deleted) != 0 { t.Errorf("The number of deleted servers %v != 0", len(deleted)) } if len(updated) != 2 { t.Errorf("The number of updated servers %v != 2", len(updated)) } servers, err = c.GetHTTPServers(ctx, upstream) if err != nil { t.Fatalf("Error when getting servers: %v", err) } for _, srv := range servers { if srv.Server == servers[0].Server { if *srv.MaxConns != newMaxConns { t.Errorf("The parameter MaxConns of the updated server %v is != %v", *srv.MaxConns, newMaxConns) } if *srv.MaxFails != newMaxFails { t.Errorf("The parameter MaxFails of the updated server %v is != %v", *srv.MaxFails, newMaxFails) } if srv.FailTimeout != newFailTimeout { t.Errorf("The parameter FailTimeout of the updated server %v is != %v", srv.FailTimeout, newFailTimeout) } if srv.SlowStart != newSlowStart { t.Errorf("The parameter SlowStart of the updated server %v is != %v", srv.SlowStart, newSlowStart) } } if srv.Server == servers[1].Server { if *srv.MaxConns != defaultMaxConns { t.Errorf("The parameter MaxConns of the updated server %v is != %v", *srv.MaxConns, defaultMaxConns) } if *srv.MaxFails != defaultMaxFails { t.Errorf("The parameter MaxFails of the updated server %v is != %v", *srv.MaxFails, defaultMaxFails) } if srv.FailTimeout != defaultFailTimeout { t.Errorf("The parameter FailTimeout of the updated server %v is != %v", srv.FailTimeout, defaultFailTimeout) } if srv.SlowStart != newSlowStart { t.Errorf("The parameter SlowStart of the updated server %v is != %v", srv.SlowStart, newSlowStart) } } } servers2 := []client.UpstreamServer{ { Server: "127.0.0.2:8003", }, { Server: "127.0.0.2:8004", }, { Server: "127.0.0.2:8005", }, } // updating with 2 new servers, 1 existing added, deleted, updated, err = c.UpdateHTTPServers(ctx, upstream, servers2) if err != nil { t.Fatalf("Error when updating servers: %v", err) } if len(added) != 2 { t.Errorf("The number of added servers %v != 2", len(added)) } if len(deleted) != 2 { t.Errorf("The number of deleted servers %v != 2", len(deleted)) } if len(updated) != 0 { t.Errorf("The number of updated servers %v != 0", len(updated)) } // updating with zero servers - removing added, deleted, updated, err = c.UpdateHTTPServers(ctx, upstream, []client.UpstreamServer{}) if err != nil { t.Fatalf("Error when updating servers: %v", err) } if len(added) != 0 { t.Errorf("The number of added servers %v != 0", len(added)) } if len(deleted) != 3 { t.Errorf("The number of deleted servers %v != 3", len(deleted)) } if len(updated) != 0 { t.Errorf("The number of updated servers %v != 0", len(updated)) } // test getting servers again servers, err = c.GetHTTPServers(ctx, upstream) if err != nil { t.Fatalf("Error when getting servers: %v", err) } if len(servers) != 0 { t.Errorf("The number of servers %v != 0", len(servers)) } } //nolint:paralleltest func TestUpstreamServer(t *testing.T) { c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } maxFails := 64 weight := 10 maxConns := 321 backup := true down := true server := client.UpstreamServer{ Server: "127.0.0.1:2000", MaxConns: &maxConns, MaxFails: &maxFails, FailTimeout: "21s", SlowStart: "12s", Weight: &weight, Route: "test", Backup: &backup, Down: &down, } ctx := context.Background() err = c.AddHTTPServer(ctx, upstream, server) if err != nil { t.Errorf("Error adding upstream server: %v", err) } servers, err := c.GetHTTPServers(ctx, upstream) if err != nil { t.Fatalf("Error getting HTTPServers: %v", err) } if len(servers) != 1 { t.Errorf("Too many servers") } // don't compare IDs servers[0].ID = 0 if !reflect.DeepEqual(server, servers[0]) { t.Errorf("Expected: %v Got: %v", server, servers[0]) } // remove upstream servers _, _, _, err = c.UpdateHTTPServers(ctx, upstream, []client.UpstreamServer{}) if err != nil { t.Errorf("Couldn't remove servers: %v", err) } } //nolint:paralleltest func TestStats(t *testing.T) { c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } server := client.UpstreamServer{ Server: "127.0.0.1:8080", } ctx := context.Background() err = c.AddHTTPServer(ctx, upstream, server) if err != nil { t.Errorf("Error adding upstream server: %v", err) } stats, err := c.GetStats(ctx) if err != nil { t.Errorf("Error getting stats: %v", err) } // NginxInfo if stats.NginxInfo.Version == "" { t.Error("Missing version string") } if stats.NginxInfo.Build == "" { t.Error("Missing build string") } if stats.NginxInfo.Address == "" { t.Errorf("Missing server address") } if stats.NginxInfo.Generation < 1 { t.Errorf("Bad config generation: %v", stats.NginxInfo.Generation) } if stats.NginxInfo.LoadTimestamp == "" { t.Error("Missing load timestamp") } if stats.NginxInfo.Timestamp == "" { t.Error("Missing timestamp") } if stats.NginxInfo.ProcessID < 1 { t.Errorf("Bad process id: %v", stats.NginxInfo.ProcessID) } if stats.NginxInfo.ParentProcessID < 1 { t.Errorf("Bad parent process id: %v", stats.NginxInfo.ParentProcessID) } if stats.Connections.Accepted < 1 { t.Errorf("Bad connections: %v", stats.Connections) } if len(stats.Workers) < 1 { t.Errorf("Bad workers: %v", stats.Workers) } if val, ok := stats.Caches[cacheZone]; ok { if val.MaxSize != 104857600 { // 100MiB t.Errorf("Cache max size stats missing: %v", val.Size) } } else { t.Errorf("Cache stats for cache zone '%v' not found", cacheZone) } if val, ok := stats.Slabs[upstream]; ok { if val.Pages.Used < 1 { t.Errorf("Slabs pages stats missing: %v", val.Pages) } if len(val.Slots) < 1 { t.Errorf("Slab slots not visible in stats: %v", val.Slots) } } else { t.Errorf("Slab stats for upstream '%v' not found", upstream) } if stats.HTTPRequests.Total < 1 { t.Errorf("Bad HTTPRequests: %v", stats.HTTPRequests) } // SSL metrics blank in this example if len(stats.ServerZones) < 1 { t.Errorf("No ServerZone metrics: %v", stats.ServerZones) } if val, ok := stats.ServerZones["test"]; ok { if val.Requests < 1 { t.Errorf("ServerZone stats missing: %v", val) } if val.Responses.Codes.HTTPOk < 1 { t.Errorf("ServerZone response codes missing: %v", val.Responses.Codes) } } else { t.Errorf("ServerZone 'test' not found") } if ups, ok := stats.Upstreams[upstream]; ok { if len(ups.Peers) < 1 { t.Errorf("upstream server not visible in stats") } else { if ups.Peers[0].State != "up" { t.Errorf("upstream server state should be 'up'") } if ups.Peers[0].HealthChecks.LastPassed { t.Errorf("upstream server health check should report last failed") } } } else { t.Errorf("Upstream 'test' not found") } if locZones, ok := stats.LocationZones[locationZone]; ok { if locZones.Requests < 1 { t.Errorf("LocationZone stats missing: %v", locZones.Requests) } } else { t.Errorf("LocationZone %v not found", locationZone) } if resolver, ok := stats.Resolvers[resolverMetric]; ok { if resolver.Requests.Name < 1 { t.Errorf("Resolvers stats missing: %v", resolver.Requests) } } else { t.Errorf("Resolver %v not found", resolverMetric) } if reqLimit, ok := stats.HTTPLimitRequests[reqZone]; ok { if reqLimit.Passed < 1 { t.Errorf("HTTP Reqs limit stats missing: %v", reqLimit.Passed) } } else { t.Errorf("HTTP Reqs limit %v not found", reqLimit) } if connLimit, ok := stats.HTTPLimitConnections[connZone]; ok { if connLimit.Passed < 1 { t.Errorf("HTTP Limit connections stats missing: %v", connLimit.Passed) } } else { t.Errorf("HTTP Limit connections %v not found", connLimit) } // cleanup upstream servers _, _, _, err = c.UpdateHTTPServers(ctx, upstream, []client.UpstreamServer{}) if err != nil { t.Errorf("Couldn't remove servers: %v", err) } } //nolint:paralleltest func TestUpstreamServerDefaultParameters(t *testing.T) { c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } server := client.UpstreamServer{ Server: "127.0.0.1:2000", } expected := client.UpstreamServer{ ID: 0, Server: "127.0.0.1:2000", MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Route: "", Backup: &defaultBackup, Down: &defaultDown, Drain: false, Weight: &defaultWeight, Service: "", } ctx := context.Background() err = c.AddHTTPServer(ctx, upstream, server) if err != nil { t.Errorf("Error adding upstream server: %v", err) } servers, err := c.GetHTTPServers(ctx, upstream) if err != nil { t.Fatalf("Error getting HTTPServers: %v", err) } if len(servers) != 1 { t.Errorf("Too many servers") } // don't compare IDs servers[0].ID = 0 if !reflect.DeepEqual(expected, servers[0]) { t.Errorf("Expected: %v Got: %v", expected, servers[0]) } // remove upstream servers _, _, _, err = c.UpdateHTTPServers(ctx, upstream, []client.UpstreamServer{}) if err != nil { t.Errorf("Couldn't remove servers: %v", err) } } //nolint:paralleltest func TestStreamStats(t *testing.T) { c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } server := client.StreamUpstreamServer{ Server: "127.0.0.1:8080", } ctx := context.Background() err = c.AddStreamServer(ctx, streamUpstream, server) if err != nil { t.Errorf("Error adding stream upstream server: %v", err) } // make connection so we have stream server zone stats - ignore response _, err = net.Dial("tcp", helpers.GetStreamAddress()) if err != nil { t.Errorf("Error making tcp connection: %v", err) } // wait for health checks time.Sleep(50 * time.Millisecond) stats, err := c.GetStats(ctx) if err != nil { t.Errorf("Error getting stats: %v", err) } if stats.Connections.Active == 0 { t.Errorf("Bad connections: %v", stats.Connections) } if len(stats.StreamServerZones) < 1 { t.Errorf("No StreamServerZone metrics: %v", stats.StreamServerZones) } if streamServerZone, ok := stats.StreamServerZones[streamUpstream]; ok { if streamServerZone.Connections < 1 { t.Errorf("StreamServerZone stats missing: %v", streamServerZone) } } else { t.Errorf("StreamServerZone 'stream_test' not found") } if upstream, ok := stats.StreamUpstreams[streamUpstream]; ok { if len(upstream.Peers) < 1 { t.Errorf("stream upstream server not visible in stats") } else { if upstream.Peers[0].State != "up" { t.Errorf("stream upstream server state should be 'up'") } if upstream.Peers[0].Connections < 1 { t.Errorf("stream upstream should have connects value") } if !upstream.Peers[0].HealthChecks.LastPassed { t.Errorf("stream upstream server health check should report last passed") } } } else { t.Errorf("Stream upstream 'stream_test' not found") } if streamConnLimit, ok := stats.StreamLimitConnections[streamConnZone]; ok { if streamConnLimit.Passed < 1 { t.Errorf("Stream Limit connections stats missing: %v", streamConnLimit.Passed) } } else { t.Errorf("Stream Limit connections %v not found", streamConnLimit) } // cleanup stream upstream servers _, _, _, err = c.UpdateStreamServers(ctx, streamUpstream, []client.StreamUpstreamServer{}) if err != nil { t.Errorf("Couldn't remove stream servers: %v", err) } } //nolint:paralleltest func TestStreamUpstreamServerDefaultParameters(t *testing.T) { c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } streamServer := client.StreamUpstreamServer{ Server: "127.0.0.1:2000", } expected := client.StreamUpstreamServer{ ID: 0, Server: "127.0.0.1:2000", MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Backup: &defaultBackup, Down: &defaultDown, Weight: &defaultWeight, Service: "", } ctx := context.Background() err = c.AddStreamServer(ctx, streamUpstream, streamServer) if err != nil { t.Errorf("Error adding upstream server: %v", err) } streamServers, err := c.GetStreamServers(ctx, streamUpstream) if err != nil { t.Fatalf("Error getting stream servers: %v", err) } if len(streamServers) != 1 { t.Errorf("Too many servers") } // don't compare IDs streamServers[0].ID = 0 if !reflect.DeepEqual(expected, streamServers[0]) { t.Errorf("Expected: %v Got: %v", expected, streamServers[0]) } // cleanup stream upstream servers _, _, _, err = c.UpdateStreamServers(ctx, streamUpstream, []client.StreamUpstreamServer{}) if err != nil { t.Errorf("Couldn't remove stream servers: %v", err) } } //nolint:paralleltest func TestKeyValue(t *testing.T) { zoneName := "zone_one" c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } ctx := context.Background() err = c.AddKeyValPair(ctx, zoneName, "key1", "val1") if err != nil { t.Errorf("Couldn't set keyvals: %v", err) } var keyValPairs client.KeyValPairs keyValPairs, err = c.GetKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("Couldn't get keyvals for zone: %v, err: %v", zoneName, err) } expectedKeyValPairs := client.KeyValPairs{ "key1": "val1", } if !reflect.DeepEqual(expectedKeyValPairs, keyValPairs) { t.Errorf("maps are not equal. expected: %+v, got: %+v", expectedKeyValPairs, keyValPairs) } keyValuPairsByZone, err := c.GetAllKeyValPairs(ctx) if err != nil { t.Errorf("Couldn't get keyvals, %v", err) } expectedKeyValPairsByZone := client.KeyValPairsByZone{ zoneName: expectedKeyValPairs, } if !reflect.DeepEqual(expectedKeyValPairsByZone, keyValuPairsByZone) { t.Errorf("maps are not equal. expected: %+v, got: %+v", expectedKeyValPairsByZone, keyValuPairsByZone) } // modify keyval expectedKeyValPairs["key1"] = "valModified1" err = c.ModifyKeyValPair(ctx, zoneName, "key1", "valModified1") if err != nil { t.Errorf("couldn't set keyval: %v", err) } keyValPairs, err = c.GetKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("couldn't get keyval: %v", err) } if !reflect.DeepEqual(expectedKeyValPairs, keyValPairs) { t.Errorf("maps are not equal. expected: %+v, got: %+v", expectedKeyValPairs, keyValPairs) } // error expected err = c.AddKeyValPair(ctx, zoneName, "key1", "valModified1") if err == nil { t.Errorf("adding same key/val should result in error") } err = c.AddKeyValPair(ctx, zoneName, "key2", "val2") if err != nil { t.Errorf("error adding another key/val pair: %v", err) } err = c.DeleteKeyValuePair(ctx, zoneName, "key1") if err != nil { t.Errorf("error deleting key") } expectedKeyValPairs2 := client.KeyValPairs{ "key2": "val2", } keyValPairs, err = c.GetKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("couldn't get keyval: %v", err) } if !reflect.DeepEqual(keyValPairs, expectedKeyValPairs2) { t.Errorf("didn't delete key1 %+v", keyValPairs) } err = c.DeleteKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("couldn't delete all: %v", err) } keyValPairs, err = c.GetKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("couldn't get keyval: %v", err) } if len(keyValPairs) > 0 { t.Errorf("zone should be empty after bulk delete") } // error expected err = c.ModifyKeyValPair(ctx, zoneName, "key1", "val1") if err == nil { t.Errorf("modifying nonexistent key/val should result in error") } } //nolint:paralleltest func TestKeyValueStream(t *testing.T) { zoneName := "zone_one_stream" c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } ctx := context.Background() err = c.AddStreamKeyValPair(ctx, zoneName, "key1", "val1") if err != nil { t.Errorf("Couldn't set keyvals: %v", err) } keyValPairs, err := c.GetStreamKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("Couldn't get keyvals for zone: %v, err: %v", zoneName, err) } expectedKeyValPairs := client.KeyValPairs{ "key1": "val1", } if !reflect.DeepEqual(expectedKeyValPairs, keyValPairs) { t.Errorf("maps are not equal. expected: %+v, got: %+v", expectedKeyValPairs, keyValPairs) } keyValPairsByZone, err := c.GetAllStreamKeyValPairs(ctx) if err != nil { t.Errorf("Couldn't get keyvals, %v", err) } expectedKeyValuePairsByZone := client.KeyValPairsByZone{ zoneName: expectedKeyValPairs, streamZoneSync: client.KeyValPairs{}, } if !reflect.DeepEqual(expectedKeyValuePairsByZone, keyValPairsByZone) { t.Errorf("maps are not equal. expected: %+v, got: %+v", expectedKeyValuePairsByZone, keyValPairsByZone) } // modify keyval expectedKeyValPairs["key1"] = "valModified1" err = c.ModifyStreamKeyValPair(ctx, zoneName, "key1", "valModified1") if err != nil { t.Errorf("couldn't set keyval: %v", err) } keyValPairs, err = c.GetStreamKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("couldn't get keyval: %v", err) } if !reflect.DeepEqual(expectedKeyValPairs, keyValPairs) { t.Errorf("maps are not equal. expected: %+v, got: %+v", expectedKeyValPairs, keyValPairs) } // error expected err = c.AddStreamKeyValPair(ctx, zoneName, "key1", "valModified1") if err == nil { t.Errorf("adding same key/val should result in error") } err = c.AddStreamKeyValPair(ctx, zoneName, "key2", "val2") if err != nil { t.Errorf("error adding another key/val pair: %v", err) } err = c.DeleteStreamKeyValuePair(ctx, zoneName, "key1") if err != nil { t.Errorf("error deleting key") } keyValPairs, err = c.GetStreamKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("couldn't get keyval: %v", err) } expectedKeyValPairs2 := client.KeyValPairs{ "key2": "val2", } if !reflect.DeepEqual(keyValPairs, expectedKeyValPairs2) { t.Errorf("didn't delete key1 %+v", keyValPairs) } err = c.DeleteStreamKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("couldn't delete all: %v", err) } keyValPairs, err = c.GetStreamKeyValPairs(ctx, zoneName) if err != nil { t.Errorf("couldn't get keyval: %v", err) } if len(keyValPairs) > 0 { t.Errorf("zone should be empty after bulk delete") } // error expected err = c.ModifyStreamKeyValPair(ctx, zoneName, "key1", "valModified") if err == nil { t.Errorf("modifying nonexistent key/val should result in error") } } func TestStreamZoneSync(t *testing.T) { t.Parallel() c1, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } c2, err := client.NewNginxClient(helpers.GetAPIEndpointOfHelper()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } ctx := context.Background() err = c1.AddStreamKeyValPair(ctx, streamZoneSync, "key1", "val1") if err != nil { t.Errorf("Couldn't set keyvals: %v", err) } // wait for nodes to sync information of synced zones time.Sleep(5 * time.Second) statsC1, err := c1.GetStats(ctx) if err != nil { t.Errorf("Error getting stats: %v", err) } if statsC1.StreamZoneSync == nil { t.Errorf("Stream zone sync can't be nil if configured") } if statsC1.StreamZoneSync.Status.NodesOnline == 0 { t.Errorf("At least 1 node must be online") } if statsC1.StreamZoneSync.Status.MsgsOut == 0 { t.Errorf("Msgs out cannot be 0") } if statsC1.StreamZoneSync.Status.MsgsIn == 0 { t.Errorf("Msgs in cannot be 0") } if statsC1.StreamZoneSync.Status.BytesIn == 0 { t.Errorf("Bytes in cannot be 0") } if statsC1.StreamZoneSync.Status.BytesOut == 0 { t.Errorf("Bytes Out cannot be 0") } if zone, ok := statsC1.StreamZoneSync.Zones[streamZoneSync]; ok { if zone.RecordsTotal == 0 { t.Errorf("Total records cannot be 0 after adding keyvals") } if zone.RecordsPending != 0 { t.Errorf("Pending records must be 0 after adding keyvals") } } else { t.Errorf("Sync zone %v missing in stats", streamZoneSync) } statsC2, err := c2.GetStats(ctx) if err != nil { t.Errorf("Error getting stats: %v", err) } if statsC2.StreamZoneSync == nil { t.Errorf("Stream zone sync can't be nil if configured") } if statsC2.StreamZoneSync.Status.NodesOnline == 0 { t.Errorf("At least 1 node must be online") } if statsC2.StreamZoneSync.Status.MsgsOut != 0 { t.Errorf("Msgs out must be 0") } if statsC2.StreamZoneSync.Status.MsgsIn == 0 { t.Errorf("Msgs in cannot be 0") } if statsC2.StreamZoneSync.Status.BytesIn == 0 { t.Errorf("Bytes in cannot be 0") } if statsC2.StreamZoneSync.Status.BytesOut != 0 { t.Errorf("Bytes out must be 0") } if zone, ok := statsC2.StreamZoneSync.Zones[streamZoneSync]; ok { if zone.RecordsTotal == 0 { t.Errorf("Total records cannot be 0 after adding keyvals") } if zone.RecordsPending != 0 { t.Errorf("Pending records must be 0 after adding keyvals") } } else { t.Errorf("Sync zone %v missing in stats", streamZoneSync) } } func compareUpstreamServers(x []client.UpstreamServer, y []client.UpstreamServer) bool { xServers := make([]string, 0, len(x)) for _, us := range x { xServers = append(xServers, us.Server) } yServers := make([]string, 0, len(y)) for _, us := range y { yServers = append(yServers, us.Server) } return reflect.DeepEqual(xServers, yServers) } func compareStreamUpstreamServers(x []client.StreamUpstreamServer, y []client.StreamUpstreamServer) bool { xServers := make([]string, 0, len(x)) for _, us := range x { xServers = append(xServers, us.Server) } yServers := make([]string, 0, len(y)) for _, us := range y { yServers = append(yServers, us.Server) } return reflect.DeepEqual(xServers, yServers) } func TestUpstreamServerWithDrain(t *testing.T) { t.Parallel() c, err := client.NewNginxClient(helpers.GetAPIEndpoint()) if err != nil { t.Fatalf("Error connecting to nginx: %v", err) } server := client.UpstreamServer{ ID: 0, Server: "127.0.0.1:9001", MaxConns: &defaultMaxConns, MaxFails: &defaultMaxFails, FailTimeout: defaultFailTimeout, SlowStart: defaultSlowStart, Route: "", Backup: &defaultBackup, Down: &defaultDown, Drain: true, Weight: &defaultWeight, Service: "", } // Get existing upstream servers ctx := context.Background() servers, err := c.GetHTTPServers(ctx, "test-drain") if err != nil { t.Fatalf("Error getting HTTPServers: %v", err) } if len(servers) != 1 { t.Errorf("Too many servers") } servers[0].ID = 0 if !reflect.DeepEqual(server, servers[0]) { t.Errorf("Expected: %v Got: %v", server, servers[0]) } } nginx-plus-go-client-2.3.0/tests/helpers/000077500000000000000000000000001474621132500203105ustar00rootroot00000000000000nginx-plus-go-client-2.3.0/tests/helpers/env_variables.go000066400000000000000000000017061474621132500234630ustar00rootroot00000000000000package helpers import "os" // GetAPIEndpoint returns the api endpoint. // For testing purposes only. The endpoint is set in the Makefile. func GetAPIEndpoint() string { ep := os.Getenv("TEST_API_ENDPOINT") if ep == "" { panic("TEST_API_ENDPOINT env variable is not set or empty") } return ep } // GetAPIEndpointOfHelper returns the api endpoint of the helper. // For testing purposes only. The endpoint is set in the Makefile. func GetAPIEndpointOfHelper() string { ep := os.Getenv("TEST_API_ENDPOINT_OF_HELPER") if ep == "" { panic("TEST_API_ENDPOINT_OF_HELPER env variable is not set or empty") } return ep } // GetStreamAddress returns the address of the unavailable stream server. // For testing purposes only. The address is set in the Makefile. func GetStreamAddress() string { addr := os.Getenv("TEST_UNAVAILABLE_STREAM_ADDRESS") if addr == "" { panic("TEST_UNAVAILABLE_STREAM_ADDRESS env variable is not set or empty") } return addr }