pax_global_header00006660000000000000000000000064145254735320014524gustar00rootroot0000000000000052 comment=98b32a6c3a87fbee5d34c063b9096f416b250897 opentelemetry-go-1.21.0/000077500000000000000000000000001452547353200151045ustar00rootroot00000000000000opentelemetry-go-1.21.0/.codespellignore000066400000000000000000000000421452547353200202570ustar00rootroot00000000000000ot fo te collison consequentially opentelemetry-go-1.21.0/.codespellrc000066400000000000000000000003741452547353200174100ustar00rootroot00000000000000# https://github.com/codespell-project/codespell [codespell] builtin = clear,rare,informal check-filenames = check-hidden = ignore-words = .codespellignore interactive = 1 skip = .git,go.mod,go.sum,semconv,venv,.tools uri-ignore-words-list = * write = opentelemetry-go-1.21.0/.gitattributes000066400000000000000000000001311452547353200177720ustar00rootroot00000000000000* text=auto eol=lf *.{cmd,[cC][mM][dD]} text eol=crlf *.{bat,[bB][aA][tT]} text eol=crlf opentelemetry-go-1.21.0/.github/000077500000000000000000000000001452547353200164445ustar00rootroot00000000000000opentelemetry-go-1.21.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001452547353200206275ustar00rootroot00000000000000opentelemetry-go-1.21.0/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000007761452547353200233330ustar00rootroot00000000000000--- name: Bug report about: Create a report of invalid behavior to help us improve title: '' labels: bug assignees: '' --- ### Description A clear and concise description of what the bug is. ### Environment - OS: [e.g. iOS] - Architecture: [e.g. x86, i386] - Go Version: [e.g. 1.15] - opentelemetry-go version: [e.g. v0.14.0, 3c7face] ### Steps To Reproduce 1. Use the configuration '...' 2. Run '...' 3. See error ### Expected behavior A clear and concise description of what you expected to happen. opentelemetry-go-1.21.0/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000012321452547353200243520ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: enhancement assignees: '' --- ### Problem Statement A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] ### Proposed Solution A clear and concise description of what you want to happen. #### Alternatives A clear and concise description of any alternative solutions or features you've considered. #### Prior Art A clear and concise list of any similar and existing solutions from other projects that provide context to possible solutions. ### Additional Context Add any other context or screenshots about the feature request here. opentelemetry-go-1.21.0/.github/ISSUE_TEMPLATE/version_release.md000066400000000000000000000025241452547353200243410ustar00rootroot00000000000000--- name: Version Release about: Checklist to follow when shipping a new release. title: 'Release Checklist' labels: '' assignees: '' --- - [ ] Complete [Milestone](https://github.com/open-telemetry/opentelemetry-go/milestone/) - [ ] Update contrib codebase to support changes about to be released (use a git sha version) - [ ] [Pre-release](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#pre-release) - [ ] [Tag](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#tag) - [ ] [Release](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#release) - [ ] [Check examples](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#verify-examples) - [ ] [Sync with Contrib](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#upgrade-goopentelemetryiootel-packages) - [ ] [Release contrib](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#release-process) - [ ] [Sync website docs](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#website-documentation) - [ ] Close the milestone opentelemetry-go-1.21.0/.github/codecov.yaml000066400000000000000000000005571452547353200207610ustar00rootroot00000000000000codecov: require_ci_to_pass: yes ignore: - "exporters/jaeger/internal/gen-go/**/*" - "exporters/jaeger/internal/third_party/**/*" coverage: precision: 1 round: down range: "70...100" status: project: default: target: auto threshold: 0.5% comment: layout: "reach,diff,flags,tree" behavior: default require_changes: yes opentelemetry-go-1.21.0/.github/dependabot.yml000066400000000000000000000130311452547353200212720ustar00rootroot00000000000000# File generated by dbotconf; DO NOT EDIT. version: 2 updates: - package-ecosystem: github-actions directory: / labels: - dependencies - actions - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: docker directory: /example/zipkin labels: - dependencies - docker - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: / labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /bridge/opencensus labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /bridge/opencensus/test labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /bridge/opentracing labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /bridge/opentracing/test labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /example/dice labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /example/namedtracer labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /example/opencensus labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /example/otel-collector labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /example/passthrough labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /example/prometheus labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /example/zipkin labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/otlp/otlpmetric/otlpmetricgrpc labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/otlp/otlpmetric/otlpmetrichttp labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/otlp/otlptrace labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/otlp/otlptrace/otlptracegrpc labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/otlp/otlptrace/otlptracehttp labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/prometheus labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/stdout/stdoutmetric labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/stdout/stdouttrace labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /exporters/zipkin labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /internal/tools labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /metric labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /schema labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /sdk labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /sdk/metric labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: gomod directory: /trace labels: - dependencies - go - Skip Changelog schedule: interval: weekly day: sunday - package-ecosystem: pip directory: / labels: - dependencies - python - Skip Changelog schedule: interval: weekly day: sunday opentelemetry-go-1.21.0/.github/workflows/000077500000000000000000000000001452547353200205015ustar00rootroot00000000000000opentelemetry-go-1.21.0/.github/workflows/benchmark.yml000066400000000000000000000021141452547353200231540ustar00rootroot00000000000000name: Benchmark on: push: tags: - v1.* workflow_dispatch: env: DEFAULT_GO_VERSION: "~1.21.3" jobs: benchmark: name: Benchmarks runs-on: ubuntu-latest timeout-minutes: 15 steps: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: ${{ env.DEFAULT_GO_VERSION }} check-latest: true cache-dependency-path: "**/go.sum" - name: Run benchmarks run: make benchmark | tee output.txt - name: Download previous benchmark data uses: actions/cache@v3 with: path: ./benchmarks key: ${{ runner.os }}-benchmark - name: Store benchmarks result uses: benchmark-action/github-action-benchmark@v1.18.0 with: name: Benchmarks tool: 'go' output-file-path: output.txt external-data-json-path: ./benchmarks/data.json github-token: ${{ secrets.GITHUB_TOKEN }} gh-pages-branch: benchmarks auto-push: true fail-on-alert: false alert-threshold: "400%" opentelemetry-go-1.21.0/.github/workflows/changelog.yml000066400000000000000000000025661452547353200231640ustar00rootroot00000000000000# This action requires that any PR targeting the main branch should touch at # least one CHANGELOG file. If a CHANGELOG entry is not required, or if # performing maintenance on the Changelog, add either \"[chore]\" to the title of # the pull request or add the \"Skip Changelog\" label to disable this action. name: changelog on: pull_request: types: [opened, synchronize, reopened, labeled, unlabeled] branches: - main jobs: changelog: runs-on: ubuntu-latest if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]')}} steps: - uses: actions/checkout@v4 - name: Check for CHANGELOG changes run: | # Only the latest commit of the feature branch is available # automatically. To diff with the base branch, we need to # fetch that too (and we only need its latest commit). git fetch origin ${{ github.base_ref }} --depth=1 if [[ $(git diff --name-only FETCH_HEAD | grep CHANGELOG) ]] then echo "A CHANGELOG was modified. Looks good!" else echo "No CHANGELOG was modified." echo "Please add a CHANGELOG entry, or add the \"Skip Changelog\" label if not required." false fi opentelemetry-go-1.21.0/.github/workflows/ci.yml000066400000000000000000000107531452547353200216250ustar00rootroot00000000000000name: ci on: push: branches: - main pull_request: env: # Path to where test results will be saved. TEST_RESULTS: /tmp/test-results # Default version of Go to use by CI workflows. This should be the latest # release of Go; developers likely use the latest release in development and # we want to catch any bugs (e.g. lint errors, race detection) with this # release before they are merged. The Go compatibility guarantees ensure # backwards compatibility with the previous two minor releases and we # explicitly test our code for these versions so keeping this at prior # versions does not add value. DEFAULT_GO_VERSION: "~1.21.3" jobs: lint: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Install Go uses: actions/setup-go@v4 with: go-version: ${{ env.DEFAULT_GO_VERSION }} check-latest: true cache-dependency-path: "**/go.sum" - name: Tools cache uses: actions/cache@v3 env: cache-name: go-tools-cache with: path: .tools key: ${{ runner.os }}-${{ env.cache-name }}-${{ hashFiles('./internal/tools/**') }} - name: Generate run: make generate - name: Run linters run: make dependabot-check license-check lint vanity-import-check - name: Build run: make build - name: Check clean repository run: make check-clean-work-tree test-bench: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Setup Environment run: | echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV echo "$(go env GOPATH)/bin" >> $GITHUB_PATH - name: Install Go uses: actions/setup-go@v4 with: go-version: ${{ env.DEFAULT_GO_VERSION }} cache-dependency-path: "**/go.sum" - name: Run benchmarks to check functionality run: make test-bench test-race: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Install Go uses: actions/setup-go@v4 with: go-version: ${{ env.DEFAULT_GO_VERSION }} check-latest: true cache-dependency-path: "**/go.sum" - name: Run tests with race detector run: make test-race test-coverage: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Install Go uses: actions/setup-go@v4 with: go-version: ${{ env.DEFAULT_GO_VERSION }} check-latest: true cache-dependency-path: "**/go.sum" - name: Run coverage tests run: | make test-coverage mkdir $TEST_RESULTS cp coverage.out $TEST_RESULTS cp coverage.txt $TEST_RESULTS cp coverage.html $TEST_RESULTS - name: Upload coverage report uses: codecov/codecov-action@v3.1.4 with: file: ./coverage.txt fail_ci_if_error: true verbose: true - name: Store coverage test output uses: actions/upload-artifact@v3 with: name: opentelemetry-go-test-output path: ${{ env.TEST_RESULTS }} compatibility-test: strategy: matrix: go-version: ["~1.21.3", "~1.20.10"] os: [ubuntu-latest, macos-latest, windows-latest] # GitHub Actions does not support arm* architectures on default # runners. It is possible to accomplish this with a self-hosted runner # if we want to add this in the future: # https://docs.github.com/en/actions/hosting-your-own-runners/using-self-hosted-runners-in-a-workflow arch: ["386", amd64] exclude: # Not a supported Go OS/architecture. - os: macos-latest arch: "386" runs-on: ${{ matrix.os }} steps: - name: Checkout code uses: actions/checkout@v4 - name: Install Go uses: actions/setup-go@v4 with: go-version: ${{ matrix.go-version }} check-latest: true cache-dependency-path: "**/go.sum" - name: Run tests env: GOARCH: ${{ matrix.arch }} run: make test-short test-compatibility: runs-on: ubuntu-latest needs: [compatibility-test] steps: - name: Test if compatibility-test passed run: | echo ${{ needs.compatibility-test.result }} test ${{ needs.compatibility-test.result }} == "success" opentelemetry-go-1.21.0/.github/workflows/codeql-analysis.yml000066400000000000000000000022031452547353200243110ustar00rootroot00000000000000name: "CodeQL Analysis" on: workflow_dispatch: schedule: # ┌───────────── minute (0 - 59) # │ ┌───────────── hour (0 - 23) # │ │ ┌───────────── day of the month (1 - 31) # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) # │ │ │ │ │ # │ │ │ │ │ # │ │ │ │ │ # * * * * * - cron: '30 1 * * *' push: branches: [ main ] pull_request: jobs: CodeQL-Build: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v2 with: languages: go - name: Autobuild uses: github/codeql-action/autobuild@v2 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v2 opentelemetry-go-1.21.0/.github/workflows/codespell.yaml000066400000000000000000000003621452547353200233400ustar00rootroot00000000000000name: codespell on: push: branches: - main pull_request: jobs: codespell: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Codespell run: make codespell opentelemetry-go-1.21.0/.github/workflows/create-dependabot-pr.yml000066400000000000000000000011041452547353200252050ustar00rootroot00000000000000name: dependabot-pr on: workflow_dispatch: jobs: create-pr: runs-on: ubuntu-latest steps: - name: Install Go uses: actions/setup-go@v4 with: go-version: "~1.21.3" check-latest: true cache-dependency-path: "**/go.sum" - uses: actions/checkout@v4 - name: Install zsh run: sudo apt-get update; sudo apt-get install zsh - name: Run dependabot-pr.sh run: ./.github/workflows/scripts/dependabot-pr.sh env: GITHUB_TOKEN: ${{ secrets.OPENTELEMETRYBOT_GITHUB_TOKEN }} opentelemetry-go-1.21.0/.github/workflows/dependabot.yml000066400000000000000000000015011452547353200233260ustar00rootroot00000000000000name: Dependabot-Tidier on: pull_request: types: [ labeled ] jobs: mod_tidier: if: ${{ contains(github.event.pull_request.labels.*.name, 'dependencies') }} runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 with: ref: ${{ github.head_ref }} - uses: actions/setup-go@v4 with: go-version: "~1.21.3" check-latest: true cache-dependency-path: "**/go.sum" - uses: evantorrie/mott-the-tidier@v1-beta id: modtidy with: gomods: '**/go.mod' gomodsum_only: true - uses: stefanzweifel/git-auto-commit-action@v5 id: autocommit with: commit_message: Auto-fix go.sum changes in dependent modules - name: changes run: | echo "Changes detected: ${{ steps.autocommit.outputs.changes_detected }}" opentelemetry-go-1.21.0/.github/workflows/links-fail-fast.yml000066400000000000000000000004011452547353200242030ustar00rootroot00000000000000name: Links (Fail Fast) on: push: pull_request: jobs: check-links: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Link Checker uses: lycheeverse/lychee-action@v1.8.0 with: fail: true opentelemetry-go-1.21.0/.github/workflows/links.yml000066400000000000000000000011111452547353200223360ustar00rootroot00000000000000name: Links on: repository_dispatch: workflow_dispatch: schedule: # Everyday at 9:00 AM. - cron: "0 9 * * *" jobs: check-links: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Link Checker id: lychee uses: lycheeverse/lychee-action@v1.8.0 - name: Create Issue From File if: steps.lychee.outputs.exit_code != 0 uses: peter-evans/create-issue-from-file@v4 with: title: Link Checker Report content-filepath: ./lychee/out.md labels: report, bot-generated opentelemetry-go-1.21.0/.github/workflows/markdown-fail-fast.yml000066400000000000000000000015741452547353200247210ustar00rootroot00000000000000name: Markdown (Fail Fast) on: push: pull_request: jobs: changedfiles: name: changed files runs-on: ubuntu-latest outputs: md: ${{ steps.changes.outputs.md }} steps: - name: Checkout Repo uses: actions/checkout@v4 with: fetch-depth: 0 - name: Get changed files id: changes run: | echo "md=$(git diff --name-only --diff-filter=ACMRTUXB origin/${{ github.event.pull_request.base.ref }} ${{ github.event.pull_request.head.sha }} | grep .md$ | xargs)" >> $GITHUB_OUTPUT lint: name: lint markdown files runs-on: ubuntu-latest needs: changedfiles if: ${{needs.changedfiles.outputs.md}} steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Run linter uses: docker://avtodev/markdown-lint:v1 with: args: ${{needs.changedfiles.outputs.md}} opentelemetry-go-1.21.0/.github/workflows/markdown.yml000066400000000000000000000013071452547353200230470ustar00rootroot00000000000000name: Markdown on: repository_dispatch: workflow_dispatch: schedule: # Everyday at 9:00 AM. - cron: "0 9 * * *" jobs: lint-markdown: runs-on: ubuntu-latest steps: - name: Checkout Repo uses: actions/checkout@v4 - name: Run linter id: markdownlint uses: docker://avtodev/markdown-lint:v1 with: config: .markdownlint.yaml args: '**/*.md' output: ./markdownlint.txt - name: Create Issue From File if: steps.markdownlint.outputs.exit_code != 0 uses: peter-evans/create-issue-from-file@v4 with: title: Markdown Lint Report content-filepath: ./markdownlint.txt labels: report, bot-generated opentelemetry-go-1.21.0/.github/workflows/scripts/000077500000000000000000000000001452547353200221705ustar00rootroot00000000000000opentelemetry-go-1.21.0/.github/workflows/scripts/dependabot-pr.sh000077500000000000000000000034231452547353200252550ustar00rootroot00000000000000#!/bin/zsh -ex # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. git config user.name opentelemetrybot git config user.email 107717825+opentelemetrybot@users.noreply.github.com BRANCH=dependabot/dependabot-prs/`date +'%Y-%m-%dT%H%M%S'` git checkout -b $BRANCH IFS=$'\n' requests=($( gh pr list --search "author:app/dependabot" --json title --jq '.[].title' )) message="" dirs=(`find . -type f -name "go.mod" -exec dirname {} \; | sort | egrep '^./'`) declare -A mods for line in $requests; do echo $line if [[ $line != Bump* ]]; then continue fi module=$(echo $line | cut -f 2 -d " ") if [[ $module == go.opentelemetry.io/otel* ]]; then continue fi version=$(echo $line | cut -f 6 -d " ") mods[$module]=$version message+=$line message+=$'\n' done for module version in ${(kv)mods}; do topdir=`pwd` for dir in $dirs; do echo "checking $dir" cd $dir && if grep -q "$module " go.mod; then go get "$module"@v"$version"; fi cd $topdir done done make go-mod-tidy make build git add go.sum go.mod git add "**/go.sum" "**/go.mod" git commit -m "dependabot updates `date` $message" git push origin $BRANCH gh pr create --title "[chore] dependabot updates `date`" --body "$message" opentelemetry-go-1.21.0/.gitignore000066400000000000000000000004651452547353200171010ustar00rootroot00000000000000.DS_Store Thumbs.db .tools/ venv/ .idea/ .vscode/ *.iml *.so coverage.* go.work go.work.sum gen/ /example/dice/dice /example/namedtracer/namedtracer /example/otel-collector/otel-collector /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus /example/zipkin/zipkin opentelemetry-go-1.21.0/.gitmodules000066400000000000000000000002231452547353200172560ustar00rootroot00000000000000[submodule "opentelemetry-proto"] path = exporters/otlp/internal/opentelemetry-proto url = https://github.com/open-telemetry/opentelemetry-proto opentelemetry-go-1.21.0/.golangci.yml000066400000000000000000000263361452547353200175020ustar00rootroot00000000000000# See https://github.com/golangci/golangci-lint#config-file run: issues-exit-code: 1 #Default tests: true #Default linters: # Disable everything by default so upgrades to not include new "default # enabled" linters. disable-all: true # Specifically enable linters we want to use. enable: - depguard - errcheck - godot - gofumpt - goimports - gosec - gosimple - govet - ineffassign - misspell - revive - staticcheck - typecheck - unused issues: # Maximum issues count per one linter. # Set to 0 to disable. # Default: 50 # Setting to unlimited so the linter only is run once to debug all issues. max-issues-per-linter: 0 # Maximum count of issues with the same text. # Set to 0 to disable. # Default: 3 # Setting to unlimited so the linter only is run once to debug all issues. max-same-issues: 0 # Excluding configuration per-path, per-linter, per-text and per-source. exclude-rules: # TODO: Having appropriate comments for exported objects helps development, # even for objects in internal packages. Appropriate comments for all # exported objects should be added and this exclusion removed. - path: '.*internal/.*' text: "exported (method|function|type|const) (.+) should have comment or be unexported" linters: - revive # Yes, they are, but it's okay in a test. - path: _test\.go text: "exported func.*returns unexported type.*which can be annoying to use" linters: - revive # Example test functions should be treated like main. - path: example.*_test\.go text: "calls to (.+) only in main[(][)] or init[(][)] functions" linters: - revive # It's okay to not run gosec in a test. - path: _test\.go linters: - gosec # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec # Igonoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: - gosec include: # revive exported should have comment or be unexported. - EXC0012 # revive package comment should be of the form ... - EXC0013 linters-settings: depguard: rules: non-tests: files: - "!$test" - "!**/*test/*.go" - "!**/internal/matchers/*.go" deny: - pkg: "testing" - pkg: "github.com/stretchr/testify" - pkg: "crypto/md5" - pkg: "crypto/sha1" - pkg: "crypto/**/pkix" otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" desc: Do not use cross-module internal packages. otlptrace-internal: files: - "!**/exporters/otlp/otlptrace/*.go" - "!**/exporters/otlp/otlptrace/internal/**.go" deny: - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" desc: Do not use cross-module internal packages. otlpmetric-internal: files: - "!**/exporters/otlp/otlpmetric/internal/*.go" - "!**/exporters/otlp/otlpmetric/internal/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" desc: Do not use cross-module internal packages. otel-internal: files: - "**/sdk/*.go" - "**/sdk/**/*.go" - "**/exporters/*.go" - "**/exporters/**/*.go" - "**/schema/*.go" - "**/schema/**/*.go" - "**/metric/*.go" - "**/metric/**/*.go" - "**/bridge/*.go" - "**/bridge/**/*.go" - "**/example/*.go" - "**/example/**/*.go" - "**/trace/*.go" - "**/trace/**/*.go" deny: - pkg: "go.opentelemetry.io/otel/internal$" desc: Do not use cross-module internal packages. - pkg: "go.opentelemetry.io/otel/internal/attribute" desc: Do not use cross-module internal packages. - pkg: "go.opentelemetry.io/otel/internal/internaltest" desc: Do not use cross-module internal packages. - pkg: "go.opentelemetry.io/otel/internal/matchers" desc: Do not use cross-module internal packages. godot: exclude: # Exclude links. - '^ *\[[^]]+\]:' # Exclude sentence fragments for lists. - '^[ ]*[-•]' # Exclude sentences prefixing a list. - ':$' goimports: local-prefixes: go.opentelemetry.io misspell: locale: US ignore-words: - cancelled revive: # Sets the default failure confidence. # This means that linting errors with less than 0.8 confidence will be ignored. # Default: 0.8 confidence: 0.01 rules: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - name: blank-imports disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - name: bool-literal-in-expr disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - name: constant-logical-expr disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - name: context-as-argument disabled: true arguments: allowTypesBefore: "*testing.T" # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type - name: context-keys-type disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - name: deep-exit disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - name: defer disabled: false arguments: - ["call-chain", "loop"] # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - name: dot-imports disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - name: duplicated-imports disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - name: early-return disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block - name: empty-block disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - name: empty-lines disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - name: error-naming disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - name: error-return disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - name: error-strings disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - name: errorf disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported - name: exported disabled: false arguments: - "sayRepetitiveInsteadOfStutters" # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - name: flag-parameter disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - name: identical-branches disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - name: if-return disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement - name: increment-decrement disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - name: indent-error-flow disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - name: import-shadowing disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments - name: package-comments disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - name: range disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - name: range-val-in-closure disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - name: range-val-address disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - name: redefines-builtin-id disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - name: string-format disabled: false arguments: - - panic - '/^[^\n]*$/' - must not contain line breaks # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - name: struct-tag disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - name: superfluous-else disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - name: time-equal disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - name: var-naming disabled: false arguments: - ["ID"] # AllowList - ["Otel", "Aws", "Gcp"] # DenyList # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - name: var-declaration disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion - name: unconditional-recursion disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - name: unexported-return disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - name: unhandled-error disabled: false arguments: - "fmt.Fprint" - "fmt.Fprintf" - "fmt.Fprintln" - "fmt.Print" - "fmt.Printf" - "fmt.Println" # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - name: unnecessary-stmt disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - name: useless-break disabled: false # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false opentelemetry-go-1.21.0/.lycheeignore000066400000000000000000000004521452547353200175630ustar00rootroot00000000000000http://localhost http://jaeger-collector https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/projects file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual opentelemetry-go-1.21.0/.markdownlint.yaml000066400000000000000000000004651452547353200205640ustar00rootroot00000000000000# Default state for all rules default: true # ul-style MD004: false # hard-tabs MD010: false # line-length MD013: false # no-duplicate-header MD024: siblings_only: true #single-title MD025: false # ol-prefix MD029: style: ordered # no-inline-html MD033: false # fenced-code-language MD040: false opentelemetry-go-1.21.0/CHANGELOG.md000066400000000000000000005777261452547353200167450ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [1.21.0/0.44.0] 2023-11-16 ### Removed - Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) - Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) - Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) - Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) ### Fixed - Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) - Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) ## [1.20.0/0.43.0] 2023-11-10 This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. ### Added - Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) - Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) - Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) - Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) - Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) - Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) - Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) - Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) - Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) - Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) - Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) - Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) ### Deprecated - Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) - Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) - Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) - Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) - Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) ### Changed - `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) - The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. Implementors need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. This extends the `Tracer` interface and is is a breaking change for any existing implementation. Implementors need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. This extends the `Span` interface and is is a breaking change for any existing implementation. Implementors need to update their implementations based on what they want the default behavior of the interface to be. See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) - Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) - Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) - Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) - Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) - Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) - Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) ### Fixed - Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) - Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) - Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) - Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) - Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) - Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) - In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) - Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) - Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) ## [1.19.0/0.42.0/0.0.7] 2023-09-28 This release contains the first stable release of the OpenTelemetry Go [metric SDK]. Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ### Added - Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) - The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) ### Changed - Allow '/' characters in metric instrument names. (#4501) - The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) - Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) ### Fixed - In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) ### Removed - Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) ## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 This is a release candidate for the v1.19.0/v0.42.0 release. That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ### Changed - Allow '/' characters in metric instrument names. (#4501) ### Fixed - In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) ## [1.18.0/0.41.0/0.0.6] 2023-09-12 This release drops the compatibility guarantee of [Go 1.19]. ### Added - Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) - Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) ### Changed - Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) ### Deprecated - The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) ### Removed - Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) - Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) - Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) - Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) - Dropped guaranteed support for versions of Go less than 1.20. (#4481) ## [1.17.0/0.40.0/0.0.5] 2023-08-28 ### Added - Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) - Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) - Add support for exponential histogram aggregations. A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) - Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) - Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) - The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) - Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) - Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) - The `go.opentelemetry.io/otel/semconv/v1.21.0` package. The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) - Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) - Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) - Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) - Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) - The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) - Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) - Support Go 1.21. (#4463) ### Changed - Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) - Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) - Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) - `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) - `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) - Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) - The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) - The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) - If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) - If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) - Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) - Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) - `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) - Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) - Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) - Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) ### Removed - Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. Use the added `WithProducer` option instead. (#4346) - Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. Notice that `PeriodicReader.ForceFlush` is still available. (#4375) ### Fixed - Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) - Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) - Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) - Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) - Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) - The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) - If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) - Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) - Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) - Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) - Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) - Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) - Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) - Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) ### Deprecated - The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. OpenTelemetry dropped support for Jaeger exporter in July 2023. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) - The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) - The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) - The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) - The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) - The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) - The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) - The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) - The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) - The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) - The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) - The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) - The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) - The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) - The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) ## [1.16.0/0.39.0] 2023-05-18 This release contains the first stable release of the OpenTelemetry Go [metric API]. Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ### Added - The `go.opentelemetry.io/otel/semconv/v1.19.0` package. The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) - The `go.opentelemetry.io/otel/semconv/v1.20.0` package. The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) - The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) - OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) - Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) ### Changed - Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) - `MeterProvider` returns noop meters once it has been shutdown. (#4154) ### Removed - The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. Use `go.opentelemetry.io/otel/metric` instead. (#4055) ### Fixed - Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) ## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 This is a release candidate for the v1.16.0/v0.39.0 release. That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ### Added - Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. - Use `GetMeterProivder` for a global `metric.MeterProvider`. - Use `SetMeterProivder` to set the global `metric.MeterProvider`. ### Changed - Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. This stages the metric API to be released as a stable module. (#4038) ### Removed - The `go.opentelemetry.io/otel/metric/global` package is removed. Use `go.opentelemetry.io/otel` instead. (#4039) ## [1.15.1/0.38.1] 2023-05-02 ### Fixed - Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) ## [1.15.0/0.38.0] 2023-04-27 ### Added - The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) - The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) - Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) - The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) - The `AddConfig` used to hold configuration for addition measurements - `NewAddConfig` used to create a new `AddConfig` - `AddOption` used to configure an `AddConfig` - The `RecordConfig` used to hold configuration for recorded measurements - `NewRecordConfig` used to create a new `RecordConfig` - `RecordOption` used to configure a `RecordConfig` - The `ObserveConfig` used to hold configuration for observed measurements - `NewObserveConfig` used to create a new `ObserveConfig` - `ObserveOption` used to configure an `ObserveConfig` - `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) - The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) - The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) ### Changed - The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) - Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) - Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` - Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) - Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) - The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) - The `Int64Counter.Add` method now accepts `...AddOption` - The `Float64Counter.Add` method now accepts `...AddOption` - The `Int64UpDownCounter.Add` method now accepts `...AddOption` - The `Float64UpDownCounter.Add` method now accepts `...AddOption` - The `Int64Histogram.Record` method now accepts `...RecordOption` - The `Float64Histogram.Record` method now accepts `...RecordOption` - The `Int64Observer.Observe` method now accepts `...ObserveOption` - The `Float64Observer.Observe` method now accepts `...ObserveOption` - The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) - The `Observer.ObserveInt64` method now accepts `...ObserveOption` - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` - Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) ### Fixed - `TracerProvider` allows calling `Tracer()` while it's shutting down. It used to deadlock. (#3924) - Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) - Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) - Automatically figure out the default aggregation with `aggregation.Default`. (#3967) ### Deprecated - The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) ## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 This is a release candidate for the v1.15.0/v0.38.0 release. That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. ### Added - The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) - The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) - The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) - Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) - The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) - Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) ### Changed - Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) - Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) - Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) - The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) - Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) - The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) - Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) - Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) - Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) - The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) ### Fixed - `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) ### Removed - The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) - The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) - The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. Use the added `float64` instrument configuration instead. (#3895) - The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. Use the added `int64` instrument configuration instead. (#3895) - The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) ## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 This is a release candidate for the v1.15.0/v0.38.0 release. That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. This release drops the compatibility guarantee of [Go 1.18]. ### Added - Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. - Use `GetMeterProivder` for a global `metric.MeterProvider`. - Use `SetMeterProivder` to set the global `metric.MeterProvider`. ### Changed - Dropped compatibility testing for [Go 1.18]. The project no longer guarantees support for this version of Go. (#3813) ### Fixed - Handle empty environment variable as it they were not set. (#3764) - Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) - Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) - Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) ### Deprecated - The `go.opentelemetry.io/otel/metric/global` package is deprecated. Use `go.opentelemetry.io/otel` instead. (#3818) ### Removed - The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) ## [1.14.0/0.37.0/0.0.4] 2023-02-27 This release is the last to support [Go 1.18]. The next release will require at least [Go 1.19]. ### Added - The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) - Support [Go 1.20]. (#3693) - The `go.opentelemetry.io/otel/semconv/v1.18.0` package. The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: - `OtelScopeNameKey` -> `OTelScopeNameKey` - `OtelScopeVersionKey` -> `OTelScopeVersionKey` - `OtelLibraryNameKey` -> `OTelLibraryNameKey` - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` - `OtelStatusCodeKey` -> `OTelStatusCodeKey` - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` - `OtelStatusCodeOk` -> `OTelStatusCodeOk` - `OtelStatusCodeError` -> `OTelStatusCodeError` - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: - `OtelScopeName` -> `OTelScopeName` - `OtelScopeVersion` -> `OTelScopeVersion` - `OtelLibraryName` -> `OTelLibraryName` - `OtelLibraryVersion` -> `OTelLibraryVersion` - `OtelStatusDescription` -> `OTelStatusDescription` - A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. See the [README](./bridge/opentracing/README.md) for more information. (#3570) - The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) - The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) - The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. ### Changed - Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) - The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. This change is made to enable memory reuse by SDK users. (#3732) - The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) ### Fixed - Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) - Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) - Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) - Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) - Data race issue in OTLP exporter retry mechanism. (#3755, #3756) - Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) - Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) ### Deprecated - The `go.opentelemetry.io/otel/metric/unit` package is deprecated. Use the equivalent unit string instead. (#3776) - Use `"1"` instead of `unit.Dimensionless` - Use `"By"` instead of `unit.Bytes` - Use `"ms"` instead of `unit.Milliseconds` ## [1.13.0/0.36.0] 2023-02-07 ### Added - Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. These functions ensure semantic convention type correctness. (#3675) ### Fixed - Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` ### Removed - The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) - The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) - The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) - The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) ## [1.12.0/0.35.0] 2023-01-28 ### Added - The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. This options is used to configure `int64` Observer callbacks during their creation. (#3507) - The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. This options is used to configure `float64` Observer callbacks during their creation. (#3507) - The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. These additions are used to enable external metric Producers. (#3524) - The `Callback` function type to `go.opentelemetry.io/otel/metric`. This new named function type is registered with a `Meter`. (#3564) - The `go.opentelemetry.io/otel/semconv/v1.13.0` package. The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. - The `go.opentelemetry.io/otel/semconv/v1.14.0` package. The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) - The `go.opentelemetry.io/otel/semconv/v1.15.0` package. The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) - The `go.opentelemetry.io/otel/semconv/v1.16.0` package. The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) - Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` - `Int64ObservableCounter` replaces the `asyncint64.Counter` - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` - `Int64ObservableGauge` replaces the `asyncint64.Gauge` - `Float64Counter` replaces the `syncfloat64.Counter` - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` - `Float64Histogram` replaces the `syncfloat64.Histogram` - `Int64Counter` replaces the `syncint64.Counter` - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` - `Int64Histogram` replaces the `syncint64.Histogram` - `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) - The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) - The `go.opentelemetry.io/otel/semconv/v1.17.0` package. The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) ### Changed - Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) - Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. - Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. This `Registration` can be used to unregister callbacks. (#3522) - Global error handler uses an atomic value instead of a mutex. (#3543) - Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) - Global logger uses an atomic value instead of a mutex. (#3545) - The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) - The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) - Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) - Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) - The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` - The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. - The named `Callback` replaces the inline function parameter. (#3564) - `Callback` is required to return an error. (#3576) - `Callback` accepts the added `Observer` parameter added. This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) - The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. Instead it uses the `net.sock.peer` attributes. (#3581) - The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) ### Fixed - Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) - The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) ### Deprecated - The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. Use `NewMetricProducer` instead. (#3541) - The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) - The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) - The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) - The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) - The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. Use `NewTracerProvider` instead. (#3116) ### Removed - The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) - The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - The `Counter` method is replaced by `Meter.Int64ObservableCounter` - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` - The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - The `Counter` method is replaced by `Meter.Float64ObservableCounter` - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` - The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - The `Counter` method is replaced by `Meter.Int64Counter` - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` - The `Histogram` method is replaced by `Meter.Int64Histogram` - The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - The `Counter` method is replaced by `Meter.Float64Counter` - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` - The `Histogram` method is replaced by `Meter.Float64Histogram` ## [1.11.2/0.34.0] 2022-12-05 ### Added - The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) - Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) - OTLP exporters now recognize: (#3363) - `OTEL_EXPORTER_OTLP_INSECURE` - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` - `OTEL_EXPORTER_OTLP_CLIENT_KEY` - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` - The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) - The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) - The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) - The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) ### Changed - The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) - The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) - The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) - The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) - The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) ### Fixed - The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) - Remove comparable requirement for `Reader`s. (#3387) - Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) - Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) - Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) - `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) - Re-enabled Attribute Filters in the Metric SDK. (#3396) - Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) - Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) - Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) - Prevent duplicate Prometheus description, unit, and type. (#3469) - Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) ### Removed - The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) - The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) ### Deprecated - The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) ## [1.11.1/0.33.0] 2022-10-19 ### Added - The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. By default, it will register with the default Prometheus registerer. A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) - Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) - The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) ### Changed - The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. It will return an error if the exporter fails to register with Prometheus. (#3239) ### Fixed - The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) - The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. This fixes the implementation to be compliant with the W3C specification. (#3226) - Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) - The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) - The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) - The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) - Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) - `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) - The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. Instead the exporter is defined as an "unchecked" collector for Prometheus. This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) - The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) - The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. This can be disabled using the `WithoutUnits()` option added to that package. (#3352) ## [1.11.0/0.32.3] 2022-10-12 ### Added - Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) ### Changed - `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) - Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) ## [0.32.2] Metric SDK (Alpha) - 2022-10-11 ### Added - Added an example of using metric views to customize instruments. (#3177) - Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) ### Changed - Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) - Update histogram default bounds to match the requirements of the latest specification. (#3222) - Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) ### Fixed - Use default view if instrument does not match any registered view of a reader. (#3224, #3237) - Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) - Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) - Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) - The OpenCensus bridge no longer sends empty batches of metrics. (#3263) ## [0.32.1] Metric SDK (Alpha) - 2022-09-22 ### Changed - The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. Invalid characters are replaced with `_`. (#3212) ### Added - The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) - The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) ### Fixed - Updated go.mods to point to valid versions of the sdk. (#3216) - Set the `MeterProvider` resource on all exported metric data. (#3218) ## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 ### Changed - The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. Please see the package documentation for how the new SDK is initialized and configured. (#3175) - Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) ### Removed - The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. A replacement package that supports the new metric SDK will be added back in a future release. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) - The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) - The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) - The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) - The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) - The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) - The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) ## [1.10.0] - 2022-09-09 ### Added - Support Go 1.19. (#3077) Include compatibility testing and document support. (#3077) - Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) - Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) ### Changed - Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) - Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) - All exporters will be shutdown even if one reports an error (#3091) - Ensure valid UTF-8 when truncating over-length attribute values. (#3156) ## [1.9.0/0.0.3] - 2022-08-01 ### Added - Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) - Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) - Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) - Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) ### Fixed - Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) ## [1.8.0/0.31.0] - 2022-07-08 ### Added - Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) ### Changed - The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) - In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) - Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) ### Removed - Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) ### Deprecated - The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. Use the equivalent `Scope` struct instead. (#2977) - The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) ## [1.7.0/0.30.0] - 2022-04-28 ### Added - Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) - Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) - Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) - Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) ### Fixed - Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) - Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) ### Changed - The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) - The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) - The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) ### Deprecated - The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. Use the equivalent `Iterator.Attribute` method instead. (#2790) - The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) - The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. Use the equivalent `MergeIterator.Attribute` method instead. (#2790) ### Removed - Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) - Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) ## [0.29.0] - 2022-04-11 ### Added - The metrics global package was added back into several test files. (#2764) - The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) ### Removed - Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) ### Changed - Don't panic anymore when setting a global MeterProvider to itself. (#2749) - Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) ## [1.6.3] - 2022-04-07 ### Fixed - Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) ## [1.6.2] - 2022-04-06 ### Changed - Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) - Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) ## [1.6.1] - 2022-03-28 ### Fixed - The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) ### Security - Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) ## [1.6.0/0.28.0] - 2022-03-23 ### ⚠️ Notice ⚠️ This update is a breaking change of the unstable Metrics API. Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. ### Added - Add metrics exponential histogram support. New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) - Add Go 1.18 to our compatibility tests. (#2679) - Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) - Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) ### Changed - The metrics API has been significantly changed to match the revised OpenTelemetry specification. High-level changes include: - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. These `InstrumentProvider`s are managed with a `Meter`. - Synchronous and asynchronous instruments are grouped into their own packages based on value types. - Asynchronous callbacks can now be registered with a `Meter`. Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) ### Fixed - Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) ## [1.5.0] - 2022-03-16 ### Added - Log the Exporters configuration in the TracerProviders message. (#2578) - Added support to configure the span limits with environment variables. The following environment variables are supported. (#2606, #2637) - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` - `OTEL_SPAN_EVENT_COUNT_LIMIT` - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` - `OTEL_SPAN_LINK_COUNT_LIMIT` - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` If the provided environment variables are invalid (negative), the default values would be used. - Rename the `gc` runtime name to `go` (#2560) - Add resource container ID detection. (#2418) - Add span attribute value length limit. The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. The default limit for this resource is "unlimited". (#2637) - Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. This option replaces the `WithSpanLimits` option. Zero or negative values will not be changed to the default value like `WithSpanLimits` does. Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) ### Changed - Drop oldest tracestate `Member` when capacity is reached. (#2592) - Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) - Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) - Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) - Introduce new internal `envconfig` package for OTLP exporters. (#2608) - If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) ### Fixed - Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) - Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) - Unlimited span limits are now supported (negative values). (#2636, #2637) ### Deprecated - Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. Use `WithRawSpanLimits` instead. That option allows setting unlimited and zero limits, this option does not. This option will be kept until the next major version incremented release. (#2637) ## [1.4.1] - 2022-02-16 ### Fixed - Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) ## [1.4.0] - 2022-02-11 ### Added - Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) - Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. To enable use a logger with Verbosity (V level) `>=1`. (#2500) - Added support to configure the batch span-processor with environment variables. The following environment variables are used. (#2515) - `OTEL_BSP_SCHEDULE_DELAY` - `OTEL_BSP_EXPORT_TIMEOUT` - `OTEL_BSP_MAX_QUEUE_SIZE`. - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` ### Changed - Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) ### Deprecated - Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) - Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) ### Fixed - Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) - Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) - Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) - Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) - W3C baggage will now decode urlescaped values. (#2529) - Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) - The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. Instead of dropping the least-recently-used attribute, the last added attribute is dropped. This drop order still only applies to attributes with unique keys not already contained in the span. If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) ### Removed - Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) ## [1.3.0] - 2021-12-10 ### ⚠️ Notice ⚠️ We have updated the project minimum supported Go version to 1.16 ### Added - Added an internal Logger. This can be used by the SDK and API to provide users with feedback of the internal state. To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) - Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) - Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) ### Changed - The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) - The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) - The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) - The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) - Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) ### Fixed - The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) - Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) - The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) ### Deprecated - Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) - Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) ### Removed - Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) - Remove the metric Bound Instruments interface and implementations. (#2399) - Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) - Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) ## [1.2.0] - 2021-11-12 ### Changed - Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) - The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) - Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) - Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) ### Added - Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) - Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) - Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) ## [1.1.0] - 2021-10-27 ### Added - Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) - Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) - Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) - Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) - When upgrading from the `semconv/v1.4.0` package note the following name changes: - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` ### Changed - Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). ### Fixed - The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) - The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) - The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) ## [1.0.1] - 2021-10-01 ### Fixed - json stdout exporter no longer crashes due to concurrency bug. (#2265) ## [Metrics 0.24.0] - 2021-10-01 ### Changed - NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) - The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. ## [1.0.0] - 2021-09-20 This is the first stable release for the project. This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). ### Added - OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) ### Fixed - Slice-valued attributes can correctly be used as map keys. (#2223) ### Removed - Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) - Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) - Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) - Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. Use the typed functions and methods added to the package instead. (#2235) - The `Key.Array` method is removed. - The `Array` function is removed. - The `Any` function is removed. - The `ArrayValue` function is removed. - The `AsArray` function is removed. ## [1.0.0-RC3] - 2021-09-02 ### Added - Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) - Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) - Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. - Added the `go.opentelemetry.io/otel/example/fib` example package. Included is an example application that computes Fibonacci numbers. (#2203) ### Changed - Metric instruments have been renamed to match the (feature-frozen) metric API specification: - ValueRecorder becomes Histogram - ValueObserver becomes Gauge - SumObserver becomes CounterObserver - UpDownSumObserver becomes UpDownCounterObserver The API exported from this project is still considered experimental. (#2202) - Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) - The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) - The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) - Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) ### Deprecated - The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. The functions from that package should be used instead. (#2166) - The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. Use the typed `*Slice` functions and types added to the package instead. (#2162) - The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. Use the typed functions instead. (#2181) - The `go.opentelemetry.io/otel/oteltest` package is deprecated. The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) ### Removed - Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) ### Fixed - The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) - Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) - The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) - Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) - The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) - Fixed typos in resources.go. (#2201) ## [1.0.0-RC2] - 2021-07-26 ### Added - Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) - Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) - Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) - Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) - Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) - Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) ### Changed - The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) - Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) ### Deprecated - The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) - The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) - The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. Use the `trace.ParseTraceState` function instead. (#2122) ### Removed - Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) - Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) - Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) - Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) - Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) ### Fixed - When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) - `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) - OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) - The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) - The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) - Use `6831` as default Jaeger agent port instead of `6832`. (#2131) ## [Experimental Metrics v0.22.0] - 2021-07-19 ### Added - Adds HTTP support for OTLP metrics exporter. (#2022) ### Removed - Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) ## [1.0.0-RC1] / 0.21.0 - 2021-06-18 With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules with major version 0. ### Added - Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) - The following status codes are defined as transient errors: | gRPC Status Code | Description | | ---------------- | ----------- | | 1 | Cancelled | | 4 | Deadline Exceeded | | 8 | Resource Exhausted | | 10 | Aborted | | 10 | Out of Range | | 14 | Unavailable | | 15 | Data Loss | - Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) - Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) - Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) - Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) - Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) - `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) - Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. This method returns the number of list-members the `TraceState` holds. (#1937) - Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) - Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) - Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) - The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) - Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) - Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) - Several builtin resource detectors now correctly populate the schema URL. (#1938) - Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. - Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) - Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) - Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) - Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) ### Changed - Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) - `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) - Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) - CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) - BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) - `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) - Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) - The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. This method returns the status of a span using the new `Status` type. (#1874) - Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) - Unembed `SpanContext` in `Link`. (#1877) - Generate Semantic conventions from the specification YAML. (#1891) - Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) - The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) - Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) - Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) - Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) - Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) - Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) - Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) - Refactored option types according to the contribution style guide. (#1882) - Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) - Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) - The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) - Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) - Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) - Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) - Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) - Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) - Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) - Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) - Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) - Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) - The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) - Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) ### Deprecated - The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) - The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) - The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) ### Removed - Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) - Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) - Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. The `IsRecording` method returns if the span is recording or not. A read-only span value does not need to know if updates to it will be recorded or not. By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) - Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) - Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) - The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) - Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) - Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) - The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) - The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) - The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) ### Fixed - Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) - The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) - BatchSpanProcessor now drops span batches that failed to be exported. (#1860) - Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) - Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) - Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) - Avoid transport security when OTLP endpoint is a Unix socket. (#2001) ### Security ## [0.20.0] - 2021-04-23 ### Added - The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) - Adds semantic conventions for exceptions. (#1492) - Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` These environment variables can be used to override Jaeger agent hostname and port (#1752) - Option `ExportTimeout` was added to batch span processor. (#1755) - `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) - The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) - The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) - Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) - Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) - Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) - Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) - `process.pid` - `process.executable.name` - `process.executable.path` - `process.command_args` - `process.owner` - `process.runtime.name` - `process.runtime.version` - `process.runtime.description` - Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) - Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) - `OTEL_EXPORTER_OTLP_ENDPOINT` - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` - `OTEL_EXPORTER_OTLP_HEADERS` - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` - `OTEL_EXPORTER_OTLP_COMPRESSION` - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` - `OTEL_EXPORTER_OTLP_TIMEOUT` - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` - `OTEL_EXPORTER_OTLP_CERTIFICATE` - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` - Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) - Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) ### Fixed - The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) - The Jaeger exporter now correctly sets tags for the Span status code and message. This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) - The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) - Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) - Fixed typo for default service name in Jaeger Exporter. (#1797) - Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) - Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. Instead, the exporter now splits the batch into smaller sendable batches. (#1828) ### Changed - Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) - Jaeger exporter was updated to use thrift v0.14.1. (#1712) - Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) - Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) - The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. The Span's SpanContext can now self-identify as being remote or not. This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) - Improve OTLP/gRPC exporter connection errors. (#1737) - Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) - The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) - Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) - Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) - The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) - Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) - Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) - The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) - The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) - The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) - Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) - Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) - Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) - The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) - The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) ### Removed - Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` These environment variables will no longer be used to override values of the Jaeger exporter (#1752) - No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) - The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) - Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) - Remove the `WithDisabled` option from the Jaeger exporter. To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) - Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. These functions for retrieving specific environment variable values are redundant of other internal functions and are not intended for end user use. (#1824) - Removed the Jaeger exporter `WithSDKOptions` `Option`. This option was used to set SDK options for the exporter creation convenience functions. These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) - The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) - The Jaeger exporter `Option` type is removed. The type is no longer used by the exporter to configure anything. All the previous configurations these options provided were duplicates of SDK configuration. They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) ## [0.19.0] - 2021-03-18 ### Added - Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) - A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) - Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) - `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) - A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) ### Changed - `trace.SpanContext` is now immutable and has no exported fields. (#1573) - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. - Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) - Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) - The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) - `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) - Added non-empty string check for trace `Attribute` keys. (#1659) - Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) - Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) - Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) - Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) - Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) - Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) ### Removed - Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) - Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) - Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. These are now returned as a SpanProcessor interface from their respective constructors. (#1638) - Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) - Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) - Removed `jaeger.WithProcess` configuration option. (#1673) - Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) ### Fixed - Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) - `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) - The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) - Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) - Synchronization issues in global trace delegate implementation. (#1686) - Reduced excess memory usage by global `TracerProvider`. (#1687) ## [0.18.0] - 2021-03-03 ### Added - Added `resource.Default()` for use with meter and tracer providers. (#1507) - `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) - Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) - Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) - Compatibility testing suite in the CI system for the following systems. (#1567) | OS | Go Version | Architecture | | ------- | ---------- | ------------ | | Ubuntu | 1.15 | amd64 | | Ubuntu | 1.14 | amd64 | | Ubuntu | 1.15 | 386 | | Ubuntu | 1.14 | 386 | | MacOS | 1.15 | amd64 | | MacOS | 1.14 | amd64 | | Windows | 1.15 | amd64 | | Windows | 1.14 | amd64 | | Windows | 1.15 | 386 | | Windows | 1.14 | 386 | ### Changed - Replaced interface `oteltest.SpanRecorder` with its existing implementation `StandardSpanRecorder`. (#1542) - Default span limit values to 128. (#1535) - Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) - Renamed the `otel/label` package to `otel/attribute`. (#1541) - Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) - Parallelize the CI linting and testing. (#1567) - Stagger timestamps in exact aggregator tests. (#1569) - Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) - Prevent end-users from implementing some interfaces (#1575) ``` "otel/exporters/otlp/otlphttp".Option "otel/exporters/stdout".Option "otel/oteltest".Option "otel/trace".TracerOption "otel/trace".SpanOption "otel/trace".EventOption "otel/trace".LifeCycleOption "otel/trace".InstrumentationOption "otel/sdk/resource".Option "otel/sdk/trace".ParentBasedSamplerOption "otel/sdk/trace".ReadOnlySpan "otel/sdk/trace".ReadWriteSpan ``` ### Removed - Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) - The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) - Removed the `test-386` make target. This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) ### Fixed - The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) - Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) - The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) - Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) - The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) ## [0.17.0] - 2021-02-12 ### Changed - Rename project default branch from `master` to `main`. (#1505) - Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) - Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) - Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) - Move metric-related public global APIs from otel to otel/metric/global. (#1528) ## Fixed - Fixed otlpgrpc reconnection issue. - The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) - The otel-collector example now uses the default OTLP receiver port of the collector. ## [0.16.0] - 2021-01-13 ### Added - Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) - `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) - Added documentation about the project's versioning policy. (#1388) - Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) - Added codeql workflow to GitHub Actions (#1428) - Added Gosec workflow to GitHub Actions (#1429) - Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) - Add an OpenCensus exporter bridge. (#1444) ### Changed - Rename `internal/testing` to `internal/internaltest`. (#1449) - Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) - Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) - Improve span duration accuracy. (#1360) - Migrated CI/CD from CircleCI to GitHub Actions (#1382) - Remove duplicate checkout from GitHub Actions workflow (#1407) - Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) - Metric `exact` aggregator includes per-point timestamps (#1412) - Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) - `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) - Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) - Unify endpoint API that related to OTel exporter. (#1401) - Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) - Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) - Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) - `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) - Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) - The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) - Metric Push and Pull Controller components are combined into a single "basic" Controller: - `WithExporter()` and `Start()` to configure Push behavior - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior - `Start()` and `Stop()` accept Context. (#1378) - The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) ### Removed - Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) - Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) - Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) ### Fixed - `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) ## [0.15.0] - 2020-12-10 ### Added - The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) ### Changed - The Zipkin exporter now uses the Span status code to determine. (#1328) - `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) - Move the OpenCensus example into `example` directory. (#1359) - Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) - Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) - Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) ### Fixed - Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) ## [0.14.0] - 2020-11-19 ### Added - An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) - A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) - `SpanContextFromContext` returns `SpanContext` from context. (#1255) - `TraceState` has been added to `SpanContext`. (#1340) - `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) - Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) - Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) - Add missing tests for `sdk/trace/attributes_map.go`. (#1337) ### Changed - Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) - `ID` has been renamed to `TraceID`. - `IDFromHex` has been renamed to `TraceIDFromHex`. - `EmptySpanContext` is removed. - Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) - OTLP Exporter updates: - supports OTLP v0.6.0 (#1230, #1354) - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) - The Sampler is now called on local child spans. (#1233) - The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) - The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. This matches the returned type and fixes misuse of the term metric. (#1240) - Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) - Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) - Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) - Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) - Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) - The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) - The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) - Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) - Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) - Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) - Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) - The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) - The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) - Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) - Updated span collection limits for attribute, event and link counts to 1000 (#1318) - Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) ### Removed - The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) - The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) - The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. `Tracer` and `Span` from the same module should be used in their place instead. (#1306) - `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) - Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) ### Fixed - Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) - The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) - Fix condition in `label.Any`. (#1299) - Fix global `TracerProvider` to pass options to its configured provider. (#1329) - Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) ## [0.13.0] - 2020-10-08 ### Added - OTLP Metric exporter supports Histogram aggregation. (#1209) - The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) - A Baggage API to implement the OpenTelemetry specification. (#1217) - Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) ### Changed - Set default propagator to no-op propagator. (#1184) - The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) - The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) - The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. They now are `Unset`, `Error`, and `Ok`. They no longer track the gRPC codes. (#1214) - The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) - Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) - A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) ### Fixed - Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) ### Removed - The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) - The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) - The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) - The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) - Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) - Nested array/slice support has been removed. (#1226) ## [0.12.0] - 2020-09-24 ### Added - A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) - In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. This addition was made to conform with our project option conventions. (#1155) - Instrumentation library information was added to the Zipkin exporter. (#1119) - The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) - More semantic conventions for k8s as resource attributes. (#1167) ### Changed - Add reconnecting udp connection type to Jaeger exporter. This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) - Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) - Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) - The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) - Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) - Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) - Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) - Move `tools` package under `internal`. (#1141) - Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. - Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) - In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) - Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) - The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to recommend the use of `newConfig()` instead of `configure()`. (#1163) - The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) - Ensure exported interface types include parameter names and update the Style Guide to reflect this styling rule. (#1172) - Don't consider unset environment variable for resource detection to be an error. (#1170) - Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. - ValueObserver instruments use LastValue aggregator by default. (#1165) - OTLP Metric exporter supports LastValue aggregation. (#1165) - Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) - Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) - Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) - Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) - Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) - Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) - Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) - Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) - Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) - Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) - Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) - Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) - Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) - Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) - Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) - Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) - Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) - Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) - Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) - Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) - The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) - Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) ### Removed - Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the `go.opentelemetry.io/contrib/propagators/` module. (#1191) - Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) ### Fixed - Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) - Fix missing shutdown processor in otel-collector example. (#1186) - Fix missing shutdown processor in basic and namedtracer examples. (#1197) ## [0.11.0] - 2020-08-24 ### Added - Support for exporting array-valued attributes via OTLP. (#992) - `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) - Support for filtering metric label sets. (#1047) - A dimensionality-reducing metric Processor. (#1057) - Integration tests for more OTel Collector Attribute types. (#1062) - A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) ### Changed - Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) - Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) - Rename `api/testharness` to `api/apitest`. (#1049) - Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) - Change Metric Processor to merge multiple observations. (#1024) - The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) - Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) - The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) - Replace `WithSyncer` with `WithBatcher` in examples. (#1044) - Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) - Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) - Unify Callback Function Naming. Rename `*Callback` with `*Func`. (#1061) - CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) - The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. This interface still supports the export of `SpanData`, but only as a slice. Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) - The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) - The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) ### Removed - Duplicate, unused API sampler interface. (#999) Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. - The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) - The `WithSpan` method of the `Tracer` interface. The functionality this method provided was limited compared to what a user can provide themselves. It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) - The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) - The `oterror` package. (#1026) - The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) ### Fixed - The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) - Correct instrumentation version tag in Jaeger exporter. (#1037) - The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) - Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) - The `otel-collector` example referenced outdated collector processors. (#1006) ## [0.10.0] - 2020-07-29 This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. ### Added - The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) - Add propagator option for gRPC instrumentation. (#986) - The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) ### Changed - Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) - The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) - Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) - The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) - `value.Bool` was replaced with `kv.BoolValue`. - `value.Int64` was replaced with `kv.Int64Value`. - `value.Uint64` was replaced with `kv.Uint64Value`. - `value.Float64` was replaced with `kv.Float64Value`. - `value.Int32` was replaced with `kv.Int32Value`. - `value.Uint32` was replaced with `kv.Uint32Value`. - `value.Float32` was replaced with `kv.Float32Value`. - `value.String` was replaced with `kv.StringValue`. - `value.Int` was replaced with `kv.IntValue`. - `value.Uint` was replaced with `kv.UintValue`. - `value.Array` was replaced with `kv.ArrayValue`. - Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) - Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) - Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) - Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) - Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) ### Removed - The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) ### Fixed - Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) - Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) - Use `global.Handle` for span export errors in the OTLP exporter. (#946) - Correct Go language formatting in the README documentation. (#961) - Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) - Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) - Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) ## [0.9.0] - 2020-07-20 ### Added - A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) - A Detector to automatically detect resources from an environment variable. (#939) - Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) - OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) ### Changed - Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) ### Removed - Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) ## [0.8.0] - 2020-07-09 ### Added - The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) - The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) - The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) - Add `peer.service` semantic attribute. (#898) - Add database-specific semantic attributes. (#899) - Add semantic convention for `faas.coldstart` and `container.id`. (#909) - Add http content size semantic conventions. (#905) - Include `http.request_content_length` in HTTP request basic attributes. (#905) - Add semantic conventions for operating system process resource attribute keys. (#919) - The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) ### Changed - Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) - Use lowercase header names for B3 Multiple Headers. (#881) - The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) - The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) - Extend semantic conventions for RPC. (#900) - To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) - `"api/standard".FaaSName` -> `FaaSNameKey` - `"api/standard".FaaSID` -> `FaaSIDKey` - `"api/standard".FaaSVersion` -> `FaaSVersionKey` - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` ### Removed - The `FlagsUnused` trace flag is removed. The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) - The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) ### Fixed - The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) - The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) - The B3 propagator now propagates the debug flag. This removes the behavior of changing the debug flag into a set sampling bit. Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) - The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) - Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) - Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) - The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) - Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) - Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) - Update otel-colector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. This is in accordance with OpenTelemetry semantic conventions. (#922) - Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) - Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) - Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) - Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) ## [0.7.0] - 2020-06-26 This release implements the v0.5.0 version of the OpenTelemetry specification. ### Added - The othttp instrumentation now includes default metrics. (#861) - This CHANGELOG file to track all changes in the project going forward. - Support for array type attributes. (#798) - Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) - Timestamps are now passed to exporters for each export. (#835) - Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. This replaces the prior `Record` `struct` use for this purpose. (#835) - New dependabot integration to automate package upgrades. (#814) - `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. This instrumentation version is passed on to exporters. (#811) (#805) (#802) - The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) - Environment variables for Jaeger exporter are supported. (#796) - New `aggregation.Kind` in the export metric API. (#808) - New example that uses OTLP and the collector. (#790) - Handle errors in the span `SetName` during span initialization. (#791) - Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) - New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) - New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) - Options to specify propagators for httptrace and grpctrace instrumentation. (#784) - The required `application/json` header for the Zipkin exporter is included in all exports. (#774) - Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 ### Changed - Rename `Integrator` to `Processor` in the metric SDK. (#863) - Rename `AggregationSelector` to `AggregatorSelector`. (#859) - Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) - Rename `simple` integrator to `basic` integrator. (#857) - Merge otlp collector examples. (#841) - Change the metric SDK to support cumulative, delta, and pass-through exporters directly. With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) - The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) - The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) - Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) - Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) - Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 - Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) - Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) - Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) - Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) ### Removed - `Uint64NumberKind` and related functions from the API. (#864) - Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) - `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) ### Fixed - Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) - Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) - Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) - Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) - Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) - Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) - Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) - Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) - Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) - Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) - Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) - Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) - Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) - Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) - Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) - Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) - Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) - Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) - Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) - Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) - Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) - Set span status from HTTP status code in the othttp instrumentation. (#832) - Fixed typo in push controller comment. (#834) - The `Aggregator` testing has been updated and cleaned. (#812) - `metric.Number(0)` expressions are replaced by `0` where possible. (#812) - Fixed `global` `handler_test.go` test failure. #804 - Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) - Fixed OTLP example's accidental early close of exporter. (#807) - Ensure zipkin exporter reads and closes response body. (#788) - Update instrumentation to use `api/standard` keys instead of custom keys. (#782) - Clean up tools and RELEASING documentation. (#762) ## [0.6.0] - 2020-05-21 ### Added - Support for `Resource`s in the prometheus exporter. (#757) - New pull controller. (#751) - New `UpDownSumObserver` instrument. (#750) - OpenTelemetry collector demo. (#711) - New `SumObserver` instrument. (#747) - New `UpDownCounter` instrument. (#745) - New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) - New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) ### Changed - Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) - Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) - Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) - Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) - The prometheus exporter now uses the new pull controller. (#751) - Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) - Support use of synchronous instruments in asynchronous callbacks (#725) - Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) - Rename `Observer` instrument to `ValueObserver`. (#734) - The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) - Replace `Measure` instrument by `ValueRecorder` instrument. (#732) - Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) ### Fixed - Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) - Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) - Fix `string` case in `kv` `Infer` function. (#746) - Fix panic in grpctrace client interceptors. (#740) - Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) - Rewrite span batch process queue batching logic. (#719) - Remove the push controller named Meter map. (#738) - Fix Histogram aggregator initial state (fix #735). (#736) - Ensure golang alpine image is running `golang-1.14` for examples. (#733) - Added test for grpctrace `UnaryInterceptorClient`. (#695) - Rearrange `api/metric` code layout. (#724) ## [0.5.0] - 2020-05-13 ### Added - Batch `Observer` callback support. (#717) - Alias `api` types to root package of project. (#696) - Create basic `othttp.Transport` for simple client instrumentation. (#678) - `SetAttribute(string, interface{})` to the trace API. (#674) - Jaeger exporter option that allows user to specify custom http client. (#671) - `Stringer` and `Infer` methods to `key`s. (#662) ### Changed - Rename `NewKey` in the `kv` package to just `Key`. (#721) - Move `core` and `key` to `kv` package. (#720) - Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) - Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) - Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) - Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) - Move `Number` from `core` to `api/metric` package. (#706) - Move `SpanContext` from `core` to `trace` package. (#692) - Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) ### Fixed - Update tooling to run generators in all submodules. (#705) - gRPC interceptor regexp to match methods without a service name. (#683) - Use a `const` for padding 64-bit B3 trace IDs. (#701) - Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) - Left-pad 64-bit B3 trace IDs with zero. (#698) - Propagate at least the first W3C tracestate header. (#694) - Remove internal `StateLocker` implementation. (#688) - Increase instance size CI system uses. (#690) - Add a `key` benchmark and use reflection in `key.Infer()`. (#679) - Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) - Reimplement histogram using mutex instead of `StateLocker`. (#669) - Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) - Update documentation to not include any references to `WithKeys`. (#672) - Correct misspelling. (#668) - Fix clobbering of the span context if extraction fails. (#656) - Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) ## [0.4.3] - 2020-04-24 ### Added - `Dockerfile` and `docker-compose.yml` to run example code. (#635) - New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) - New `api/label` package, providing common label set implementation. (#651) - Support for JSON marshaling of `Resources`. (#654) - `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) - `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) - `WithSpanFormatter` option to the othttp plugin. (#617) - Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) - The prometheus exporter now supports exporting histograms. (#601) - A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) - An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) - An `Equal` method to the `Resource` test the equivalence of resources. (#613) - An iterable structure (`AttributeIterator`) for `Resource` attributes. ### Changed - zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) - Pass `Resources` through the metrics export pipeline. (#659) ### Removed - `WithKeys` option from the metric API. (#639) ### Fixed - Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) - Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) - Use type names for return values in jaeger exporter. (#648) - Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) - `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) - Do not cache `reflect.ValueOf()` in metric Labels. (#649) - Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) - Add error wrapping to the prometheus exporter. (#631) - Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) - Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) - Update `Resource` internal representation to uniquely and reliably identify resources. (#613) - Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) - Ensure spans created by httptrace client tracer reflect operation structure. (#618) - Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 - The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) ## [0.4.2] - 2020-03-31 ### Fixed - Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) - Fix time conversion from internal to OTLP in OTLP exporter. (#606) ## [0.4.1] - 2020-03-31 ### Fixed - Update `tag.sh` to create signed tags. (#604) ## [0.4.0] - 2020-03-30 ### Added - New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) - Script to verify examples after a new release. (#579) ### Removed - The dogstatsd exporter due to lack of support. This additionally removes support for statsd. (#591) - `LabelSet` from the metric API. This is replaced by a `[]core.KeyValue` slice. (#595) - `Labels` from the metric API's `Meter` interface. (#595) ### Changed - The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) - Renamed `internal/metric.Meter` to `MeterImpl`. (#580) - Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) ### Fixed - Corrected missing return in mock span. (#582) - Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) - Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) - Update pre-release script to be compatible between GNU and BSD based systems. (#592) - Add a `RecordBatch` benchmark. (#594) - Moved span transforms of the OTLP exporter to the internal package. (#593) - Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) - Removed unneeded allocation on empty labels in OLTP exporter. (#597) - Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) - Update project documentation godoc.org links to pkg.go.dev. (#602) ## [0.3.0] - 2020-03-21 This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. There is still a possibility of breaking changes. ### Added - Add `Observer` metric instrument. (#474) - Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) - Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) - The zipkin trace exporter. (#495) - The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) - Add `StatusMessage` field to the trace `Span`. (#524) - Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) - The `Resource` type was added to the SDK. (#528) - The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) - The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) - A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) - Scripts to better automate the release process. (#576) ### Changed - Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) - Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) - Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) - The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) - Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) - Rename metric API `Options` to `Config`. (#541) - Rename metric `Counter` aggregator to be `Sum`. (#541) - Unify metric options into `Option` from instrument specific options. (#541) - The trace API's `TraceProvider` now support `Resource`s. (#545) - Correct error in zipkin module name. (#548) - The jaeger trace exporter now supports `Resource`s. (#551) - Metric SDK now supports `Resource`s. The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) - Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) - The stdout trace exporter now supports `Resource`s. (#558) - The metric `Descriptor` is now included at the API instead of the SDK. (#560) - Replace `Ordered` with an iterator in `export.Labels`. (#567) ### Removed - The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) - The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) - `GetDescriptor` from the metric SDK. (#575) - The `Gauge` instrument from the metric API. (#537) ### Fixed - Make histogram aggregator checkpoint consistent. (#438) - Update README with import instructions and how to build and test. (#505) - The default label encoding was updated to be unique. (#508) - Use `NewRoot` in the othttp plugin for public endpoints. (#513) - Fix data race in `BatchedSpanProcessor`. (#518) - Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 - Use a variable-size array to represent ordered labels in maps. (#523) - Update the OTLP protobuf and update changed import path. (#532) - Use `StateLocker` implementation in `MinMaxSumCount`. (#546) - Eliminate goroutine leak in histogram stress test. (#547) - Update OTLP exporter with latest protobuf. (#550) - Add filters to the othttp plugin. (#556) - Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) - Encode labels once during checkpoint. The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) - Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) ## [0.2.3] - 2020-03-04 ### Added - `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) - Configurable push frequency for exporters setup pipeline. (#504) ### Changed - Rename the `exporter` directory to `exporters`. The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. This resulted in all subsequent releases not becoming the default latest. A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. Consequentially, this action also renames *all* exporter packages. (#502) ### Removed - The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) ## [0.2.2] - 2020-02-27 ### Added - `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) - `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) - `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) - `Config` and configuring `Option` to the propagator API. (#467) - `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) - `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) - `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) - `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) - Histogram aggregator. (#433) - `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) - `AlwaysParentSample` sampler to the trace API. (#455) - `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) ### Changed - Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) - Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) - Move correlation context propagation to correlation package. (#479) - Do not default to putting remote span context into links. (#480) - `Tracer.WithSpan` updated to accept `StartOptions`. (#472) - Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) - Renamed the `export` package to `metric` to match directory structure. (#432) - Rename the `api/distributedcontext` package to `api/correlation`. (#444) - Rename the `api/propagators` package to `api/propagation`. (#444) - Move the propagators from the `propagators` package into the `trace` API package. (#444) - Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) - Moved all dependencies of tools package to a tools directory. (#466) ### Removed - Binary propagators. (#467) - NOOP propagator. (#467) ### Fixed - Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) - Fix a possible nil-dereference crash (#478) - Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) - Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) - Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) - Initialize `onError` based on `Config` in prometheus exporter. (#486) - Correct module name in prometheus exporter README. (#475) - Removed tracer name prefix from span names. (#430) - Fix `aggregator_test.go` import package comment. (#431) - Improved detail in stdout exporter. (#436) - Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) - Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) - Reword function documentation in gRPC plugin. (#446) - Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) - Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) - Upgraded to Go 1.13 in CI. (#465) - Correct opentelemetry.io URL in trace SDK documentation. (#464) - Refactored reference counting logic in SDK determination of stale records. (#468) - Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) ## [0.2.1.1] - 2020-01-13 ### Fixed - Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) ## [0.2.1] - 2020-01-08 ### Added - Global meter forwarding implementation. This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) - Global trace forwarding implementation. This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) - Standardize export pipeline creation in all exporters. (#395) - A testing, organization, and comments for 64-bit field alignment. (#418) - Script to tag all modules in the project. (#414) ### Changed - Renamed `propagation` package to `propagators`. (#362) - Renamed `B3Propagator` propagator to `B3`. (#362) - Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) - Renamed `BinaryPropagator` propagator to `Binary`. (#362) - Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) - Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) - Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) - Renamed `SpanOption` to `StartOption` in the trace API. (#369) - Renamed `StartOptions` to `StartConfig` in the trace API. (#369) - Renamed `EndOptions` to `EndConfig` in the trace API. (#369) - `Number` now has a pointer receiver for its methods. (#375) - Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) - Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) - Renamed `Message` in Event to `Name` in the trace API. (#389) - Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) - Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) - Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) - Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) - Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) - Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) - Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) - Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) - Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) - Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) - Renamed the `File` option in the stdout exporter to `Writer`. (#404) - Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. ### Fixed - Aggregator import path corrected. (#421) - Correct links in README. (#368) - The README was updated to match latest code changes in its examples. (#374) - Don't capitalize error statements. (#375) - Fix ignored errors. (#375) - Fix ambiguous variable naming. (#375) - Removed unnecessary type casting. (#375) - Use named parameters. (#375) - Updated release schedule. (#378) - Correct http-stackdriver example module name. (#394) - Removed the `http.request` span in `httptrace` package. (#397) - Add comments in the metrics SDK (#399) - Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) - Add documentation of compatible exporters in the README. (#405) - Typo fix. (#408) - Simplify span check logic in SDK tracer implementation. (#419) ## [0.2.0] - 2019-12-03 ### Added - Unary gRPC tracing example. (#351) - Prometheus exporter. (#334) - Dogstatsd metrics exporter. (#326) ### Changed - Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) - Rename `GetMeter` to `Meter`. (#357) - Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) - Rename `HTTPB3Propagator` to `B3Propagator`. (#355) - Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) - Move `/global` package to `/api/global`. (#356) - Rename `GetTracer` to `Tracer`. (#347) ### Removed - `SetAttribute` from the `Span` interface in the trace API. (#361) - `AddLink` from the `Span` interface in the trace API. (#349) - `Link` from the `Span` interface in the trace API. (#349) ### Fixed - Exclude example directories from coverage report. (#365) - Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) - Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) - Run the race checker for all test. (#354) - Redundant commands in the Makefile are removed. (#354) - Split the `generate` and `lint` targets of the Makefile. (#354) - Renames `circle-ci` target to more generic `ci` in Makefile. (#354) - Add example Prometheus binary to gitignore. (#358) - Support negative numbers with the `MaxSumCount`. (#335) - Resolve race conditions in `push_test.go` identified in #339. (#340) - Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) - Trace benchmark now tests both `AlwaysSample` and `NeverSample`. Previously it was testing `AlwaysSample` twice. (#325) - Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) - Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) - The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. This was corrected. (#333) ## [0.1.2] - 2019-11-18 ### Fixed - Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) - Removed unnecessary unslicing of parameters that are already a slice. (#324) ## [0.1.1] - 2019-11-18 This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. ### Added - Metrics stdout export pipeline. (#265) - Array aggregation for raw measure metrics. (#282) - The core.Value now have a `MarshalJSON` method. (#281) ### Removed - `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) - Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) ### Changed - Allocation in LabelSet construction to reduce GC overhead. (#318) - `trace.WithAttributes` to append values instead of replacing (#315) - Use a formula for tolerance in sampling tests. (#298) - Move export types into trace and metric-specific sub-directories. (#289) - `SpanKind` back to being based on an `int` type. (#288) ### Fixed - URL to OpenTelemetry website in README. (#323) - Name of othttp default tracer. (#321) - `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) - CI modules cache to correctly restore/save from/to the cache. (#316) - Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) - README now reflects the new code structure introduced with these changes. (#291) - Make the basic example work. (#279) ## [0.1.0] - 2019-11-04 This is the first release of open-telemetry go library. It contains api and sdk for trace and meter. ### Added - Initial OpenTelemetry trace and metric API prototypes. - Initial OpenTelemetry trace, metric, and export SDK packages. - A wireframe bridge to support compatibility with OpenTracing. - Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. - Exporters for Jaeger, Stackdriver, and stdout. - Propagators for binary, B3, and trace-context protocols. - Project information and guidelines in the form of a README and CONTRIBUTING. - Tools to build the project and a Makefile to automate the process. - Apache-2.0 license. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD [1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 [1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 [1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 [1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 [1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 [1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 [1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 [1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 [1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 [1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 [1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 [1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 [1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 [1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 [1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 [1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 [1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 [0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 [0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 [0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 [1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 [1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 [1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 [1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 [0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 [1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 [1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 [1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 [1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 [1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 [1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 [1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 [1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 [1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 [1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 [1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 [Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 [1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 [1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 [1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 [Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 [1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 [0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 [0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 [0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 [0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 [0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 [0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 [0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 [0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 [0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 [0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 [0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 [0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 [0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 [0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 [0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 [0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 [0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 [0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 [0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 [0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 [0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 [0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 [0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 [0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 [0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 [0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 [0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 [Go 1.20]: https://go.dev/doc/go1.20 [Go 1.19]: https://go.dev/doc/go1.19 [Go 1.18]: https://go.dev/doc/go1.18 [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace opentelemetry-go-1.21.0/CODEOWNERS000066400000000000000000000010251452547353200164750ustar00rootroot00000000000000##################################################### # # List of approvers for this repository # ##################################################### # # Learn about membership in OpenTelemetry community: # https://github.com/open-telemetry/community/blob/main/community-membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # * @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu CODEOWNERS @MrAlias @MadVikingGod @pellaredopentelemetry-go-1.21.0/CONTRIBUTING.md000066400000000000000000000477121452547353200173500ustar00rootroot00000000000000# Contributing to opentelemetry-go The Go special interest group (SIG) meets regularly. See the OpenTelemetry [community](https://github.com/open-telemetry/community#golang-sdk) repo for information on this and other language SIGs. See the [public meeting notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) for a summary description of past meetings. To request edit access, join the meeting or get in touch on [Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). ## Development You can view and edit the source code by cloning this repository: ```sh git clone https://github.com/open-telemetry/opentelemetry-go.git ``` Run `make test` to run the tests instead of `go test`. There are some generated files checked into the repo. To make sure that the generated files are up-to-date, run `make` (or `make precommit` - the `precommit` target is the default). The `precommit` target also fixes the formatting of the code and checks the status of the go module files. Additionally, there is a `codespell` target that checks for common typos in the code. It is not run by default, but you can run it manually with `make codespell`. It will set up a virtual environment in `venv` and install `codespell` there. If after running `make precommit` the output of `git status` contains `nothing to commit, working tree clean` then it means that everything is up-to-date and properly formatted. ## Pull Requests ### How to Send Pull Requests Everyone is welcome to contribute code to `opentelemetry-go` via GitHub pull requests (PRs). To create a new PR, fork the project in GitHub and clone the upstream repo: ```sh go get -d go.opentelemetry.io/otel ``` (This may print some warning about "build constraints exclude all Go files", just ignore it.) This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You can alternatively use `git` directly with: ```sh git clone https://github.com/open-telemetry/opentelemetry-go ``` (Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - that name is a kind of a redirector to GitHub that `go get` can understand, but `git` does not.) This would put the project in the `opentelemetry-go` directory in current working directory. Enter the newly created directory and add your fork as a new remote: ```sh git remote add git@github.com:/opentelemetry-go ``` Check out a new branch, make modifications, run linters and tests, update `CHANGELOG.md`, and push the branch to your fork: ```sh git checkout -b # edit files # update changelog make precommit git add -p git commit git push ``` Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull request ID to the entry you added to `CHANGELOG.md`. Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. Rewriting Git history makes it difficult to keep track of iterations during code review. All pull requests are squashed to a single commit upon merge to `main`. ### How to Receive Comments * If the PR is not ready for review, please put `[WIP]` in the title, tag it as `work-in-progress`, or mark it as [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). * Make sure CLA is signed and CI is clear. ### How to Get PRs Merged A PR is considered **ready to merge** when: * It has received two qualified approvals[^1]. This is not enforced through automation, but needs to be validated by the maintainer merging. * The qualified approvals need to be from [Approver]s/[Maintainer]s affiliated with different companies. Two qualified approvals from [Approver]s or [Maintainer]s affiliated with the same company counts as a single qualified approval. * PRs introducing changes that have already been discussed and consensus reached only need one qualified approval. The discussion and resolution needs to be linked to the PR. * Trivial changes[^2] only need one qualified approval. * All feedback has been addressed. * All PR comments and suggestions are resolved. * All GitHub Pull Request reviews with a status of "Request changes" have been addressed. Another review by the objecting reviewer with a different status can be submitted to clear the original review, or the review can be dismissed by a [Maintainer] when the issues from the original review have been addressed. * Any comments or reviews that cannot be resolved between the PR author and reviewers can be submitted to the community [Approver]s and [Maintainer]s during the weekly SIG meeting. If consensus is reached among the [Approver]s and [Maintainer]s during the SIG meeting the objections to the PR may be dismissed or resolved or the PR closed by a [Maintainer]. * Any substantive changes to the PR require existing Approval reviews be cleared unless the approver explicitly states that their approval persists across changes. This includes changes resulting from other feedback. [Approver]s and [Maintainer]s can help in clearing reviews and they should be consulted if there are any questions. * The PR branch is up to date with the base branch it is merging into. * To ensure this does not block the PR, it should be configured to allow maintainers to update it. * It has been open for review for at least one working day. This gives people reasonable time to review. * Trivial changes[^2] do not have to wait for one day and may be merged with a single [Maintainer]'s approval. * All required GitHub workflows have succeeded. * Urgent fix can take exception as long as it has been actively communicated among [Maintainer]s. Any [Maintainer] can merge the PR once the above criteria have been met. [^1]: A qualified approval is a GitHub Pull Request review with "Approve" status from an OpenTelemetry Go [Approver] or [Maintainer]. [^2]: Trivial changes include: typo corrections, cosmetic non-substantive changes, documentation corrections or updates, dependency updates, etc. ## Design Choices As with other OpenTelemetry clients, opentelemetry-go follows the [OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). It's especially valuable to read through the [library guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). ### Focus on Capabilities, Not Structure Compliance OpenTelemetry is an evolving specification, one where the desires and use cases are clear, but the method to satisfy those uses cases are not. As such, Contributions should provide functionality and behavior that conforms to the specification, but the interface and structure is flexible. It is preferable to have contributions follow the idioms of the language rather than conform to specific API names or argument patterns in the spec. For a deeper discussion, see [this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). ## Documentation Each (non-internal, non-test) package must be documented using [Go Doc Comments](https://go.dev/doc/comment), preferably in a `doc.go` file. Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) instead of putting code snippets in Go doc comments. In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). You can install and run a "local Go Doc site" in the following way: ```sh go install golang.org/x/pkgsite/cmd/pkgsite@latest pkgsite ``` [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is an example of a very well-documented package. ## Style Guide One of the primary goals of this project is that it is actually used by developers. With this goal in mind the project strives to build user-friendly and idiomatic Go code adhering to the Go community's best practices. For a non-comprehensive but foundational overview of these best practices the [Effective Go](https://golang.org/doc/effective_go.html) documentation is an excellent starting place. As a convenience for developers building this project the `make precommit` will format, lint, validate, and in some cases fix the changes you plan to submit. This check will need to pass for your changes to be able to be merged. In addition to idiomatic Go, the project has adopted certain standards for implementations of common patterns. These standards should be followed as a default, and if they are not followed documentation needs to be included as to the reasons why. ### Configuration When creating an instantiation function for a complex `type T struct`, it is useful to allow variable number of options to be applied. However, the strong type system of Go restricts the function design options. There are a few ways to solve this problem, but we have landed on the following design. #### `config` Configuration should be held in a `struct` named `config`, or prefixed with specific type name this Configuration applies to if there are multiple `config` in the package. This type must contain configuration options. ```go // config contains configuration options for a thing. type config struct { // options ... } ``` In general the `config` type will not need to be used externally to the package and should be unexported. If, however, it is expected that the user will likely want to build custom options for the configuration, the `config` should be exported. Please, include in the documentation for the `config` how the user can extend the configuration. It is important that internal `config` are not shared across package boundaries. Meaning a `config` from one package should not be directly used by another. The one exception is the API packages. The configs from the base API, eg. `go.opentelemetry.io/otel/trace.TracerConfig` and `go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed by the SDK therefore it is expected that these are exported. When a config is exported we want to maintain forward and backward compatibility, to achieve this no fields should be exported but should instead be accessed by methods. Optionally, it is common to include a `newConfig` function (with the same naming scheme). This function wraps any defaults setting and looping over all options to create a configured `config`. ```go // newConfig returns an appropriately configured config. func newConfig(options ...Option) config { // Set default values for config. config := config{/* […] */} for _, option := range options { config = option.apply(config) } // Perform any validation here. return config } ``` If validation of the `config` options is also performed this can return an error as well that is expected to be handled by the instantiation function or propagated to the user. Given the design goal of not having the user need to work with the `config`, the `newConfig` function should also be unexported. #### `Option` To set the value of the options a `config` contains, a corresponding `Option` interface type should be used. ```go type Option interface { apply(config) config } ``` Having `apply` unexported makes sure that it will not be used externally. Moreover, the interface becomes sealed so the user cannot easily implement the interface on its own. The `apply` method should return a modified version of the passed config. This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. The name of the interface should be prefixed in the same way the corresponding `config` is (if at all). #### Options All user configurable options for a `config` must have a related unexported implementation of the `Option` interface and an exported configuration function that wraps this implementation. The wrapping function name should be prefixed with `With*` (or in the special case of a boolean options `Without*`) and should have the following function signature. ```go func With*(…) Option { … } ``` ##### `bool` Options ```go type defaultFalseOption bool func (o defaultFalseOption) apply(c config) config { c.Bool = bool(o) return c } // WithOption sets a T to have an option included. func WithOption() Option { return defaultFalseOption(true) } ``` ```go type defaultTrueOption bool func (o defaultTrueOption) apply(c config) config { c.Bool = bool(o) return c } // WithoutOption sets a T to have Bool option excluded. func WithoutOption() Option { return defaultTrueOption(false) } ``` ##### Declared Type Options ```go type myTypeOption struct { MyType MyType } func (o myTypeOption) apply(c config) config { c.MyType = o.MyType return c } // WithMyType sets T to have include MyType. func WithMyType(t MyType) Option { return myTypeOption{t} } ``` ##### Functional Options ```go type optionFunc func(config) config func (fn optionFunc) apply(c config) config { return fn(c) } // WithMyType sets t as MyType. func WithMyType(t MyType) Option { return optionFunc(func(c config) config { c.MyType = t return c }) } ``` #### Instantiation Using this configuration pattern to configure instantiation with a `NewT` function. ```go func NewT(options ...Option) T {…} ``` Any required parameters can be declared before the variadic `options`. #### Dealing with Overlap Sometimes there are multiple complex `struct` that share common configuration and also have distinct configuration. To avoid repeated portions of `config`s, a common `config` can be used with the union of options being handled with the `Option` interface. For example. ```go // config holds options for all animals. type config struct { Weight float64 Color string MaxAltitude float64 } // DogOption apply Dog specific options. type DogOption interface { applyDog(config) config } // BirdOption apply Bird specific options. type BirdOption interface { applyBird(config) config } // Option apply options for all animals. type Option interface { BirdOption DogOption } type weightOption float64 func (o weightOption) applyDog(c config) config { c.Weight = float64(o) return c } func (o weightOption) applyBird(c config) config { c.Weight = float64(o) return c } func WithWeight(w float64) Option { return weightOption(w) } type furColorOption string func (o furColorOption) applyDog(c config) config { c.Color = string(o) return c } func WithFurColor(c string) DogOption { return furColorOption(c) } type maxAltitudeOption float64 func (o maxAltitudeOption) applyBird(c config) config { c.MaxAltitude = float64(o) return c } func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } func NewDog(name string, o ...DogOption) Dog {…} func NewBird(name string, o ...BirdOption) Bird {…} ``` ### Interfaces To allow other developers to better comprehend the code, it is important to ensure it is sufficiently documented. One simple measure that contributes to this aim is self-documenting by naming method parameters. Therefore, where appropriate, methods of every exported interface type should have their parameters appropriately named. #### Interface Stability All exported stable interfaces that include the following warning in their documentation are allowed to be extended with additional methods. > Warning: methods may be added to this interface in minor releases. These interfaces are defined by the OpenTelemetry specification and will be updated as the specification evolves. Otherwise, stable interfaces MUST NOT be modified. #### How to Change Specification Interfaces When an API change must be made, we will update the SDK with the new method one release before the API change. This will allow the SDK one version before the API change to work seamlessly with the new API. If an incompatible version of the SDK is used with the new API the application will fail to compile. #### How Not to Change Specification Interfaces We have explored using a v2 of the API to change interfaces and found that there was no way to introduce a v2 and have it work seamlessly with the v1 of the API. Problems happened with libraries that upgraded to v2 when an application did not, and would not produce any telemetry. More detail of the approaches considered and their limitations can be found in the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) issue. #### How to Change Other Interfaces If new functionality is needed for an interface that cannot be changed it MUST be added by including an additional interface. That added interface can be a simple interface for the specific functionality that you want to add or it can be a super-set of the original interface. For example, if you wanted to a `Close` method to the `Exporter` interface: ```go type Exporter interface { Export() } ``` A new interface, `Closer`, can be added: ```go type Closer interface { Close() } ``` Code that is passed the `Exporter` interface can now check to see if the passed value also satisfies the new interface. E.g. ```go func caller(e Exporter) { /* ... */ if c, ok := e.(Closer); ok { c.Close() } /* ... */ } ``` Alternatively, a new type that is the super-set of an `Exporter` can be created. ```go type ClosingExporter struct { Exporter Close() } ``` This new type can be used similar to the simple interface above in that a passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type and the `Close` method called. This super-set approach can be useful if there is explicit behavior that needs to be coupled with the original type and passed as a unified type to a new function, but, because of this coupling, it also limits the applicability of the added functionality. If there exist other interfaces where this functionality should be added, each one will need their own super-set interfaces and will duplicate the pattern. For this reason, the simple targeted interface that defines the specific functionality should be preferred. ### Testing The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the absence of race conditions. ### Internal packages The use of internal packages should be scoped to a single module. A sub-module should never import from a parent internal package. This creates a coupling between the two modules where a user can upgrade the parent without the child and if the internal package API has changed it will fail to upgrade[^3]. There are two known exceptions to this rule: - `go.opentelemetry.io/otel/internal/global` - This package manages global state for all of opentelemetry-go. It needs to be a single package in order to ensure the uniqueness of the global state. - `go.opentelemetry.io/otel/internal/baggage` - This package provides values in a `context.Context` that need to be recognized by `go.opentelemetry.io/otel/baggage` and `go.opentelemetry.io/otel/bridge/opentracing` but remain private. If you have duplicate code in multiple modules, make that code into a Go template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] to render the templates in the desired locations. See [#4404] for an example of this. [^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 ## Approvers and Maintainers ### Approvers - [Evan Torrie](https://github.com/evantorrie), Verizon Media - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [David Ashpole](https://github.com/dashpole), Google - [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Damien Mathieu](https://github.com/dmathieu), Elastic - [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers - [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Robert Pająk](https://github.com/pellared), Splunk - [Tyler Yahn](https://github.com/MrAlias), Splunk ### Emeritus - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). [Approver]: #approvers [Maintainer]: #maintainers [gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl [#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 opentelemetry-go-1.21.0/LICENSE000066400000000000000000000261351452547353200161200ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. opentelemetry-go-1.21.0/Makefile000066400000000000000000000250521452547353200165500ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. TOOLS_MOD_DIR := ./internal/tools ALL_DOCS := $(shell find . -name '*.md' -type f | sort) ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) GO = go TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage # Tools TOOLS = $(CURDIR)/.tools $(TOOLS): @mkdir -p $@ $(TOOLS)/%: | $(TOOLS) cd $(TOOLS_MOD_DIR) && \ $(GO) build -o $@ $(PACKAGE) MULTIMOD = $(TOOLS)/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod SEMCONVGEN = $(TOOLS)/semconvgen $(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen CROSSLINK = $(TOOLS)/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink SEMCONVKIT = $(TOOLS)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit DBOTCONF = $(TOOLS)/dbotconf $(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint MISSPELL = $(TOOLS)/misspell $(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell GOCOVMERGE = $(TOOLS)/gocovmerge $(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge STRINGER = $(TOOLS)/stringer $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto GOJQ = $(TOOLS)/gojq $(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl GORELEASE = $(TOOLS)/gorelease $(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker # The directory where the virtual environment is created. VENVDIR := venv # The directory where the python tools are installed. PYTOOLS := $(VENVDIR)/bin # The pip executable in the virtual environment. PIP := $(PYTOOLS)/pip # The directory in the docker image where the current directory is mounted. WORKDIR := /workdir # The python image to use for the virtual environment. PYTHONIMAGE := python:3.11.3-slim-bullseye # Run the python image with the current directory mounted. DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) # Create a virtual environment for Python tools. $(PYTOOLS): # The `--upgrade` flag is needed to ensure that the virtual environment is # created with the latest pip version. @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" # Install python packages into the virtual environment. $(PYTOOLS)/%: | $(PYTOOLS) @$(DOCKERPY) $(PIP) install -r requirements.txt CODESPELL = $(PYTOOLS)/codespell $(CODESPELL): PACKAGE=codespell # Generate .PHONY: generate generate: go-generate vanity-import-fix .PHONY: go-generate go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate/%: DIR=$* go-generate/%: | $(STRINGER) $(GOTMPL) @echo "$(GO) generate $(DIR)/..." \ && cd $(DIR) \ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... .PHONY: vanity-import-fix vanity-import-fix: | $(PORTO) @$(PORTO) --include-internal -w . # Generate go.work file for local development. .PHONY: go-work go-work: | $(CROSSLINK) $(CROSSLINK) work --root=$(shell pwd) # Build .PHONY: build build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) build/%: DIR=$* build/%: @echo "$(GO) build $(DIR)/..." \ && cd $(DIR) \ && $(GO) build ./... build-tests/%: DIR=$* build-tests/%: @echo "$(GO) build tests $(DIR)/..." \ && cd $(DIR) \ && $(GO) list ./... \ | grep -v third_party \ | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null # Tests TEST_TARGETS := test-default test-bench test-short test-verbose test-race .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* test/%: @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ && cd $(DIR) \ && $(GO) list ./... \ | grep -v third_party \ | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) COVERAGE_MODE = atomic COVERAGE_PROFILE = coverage.out .PHONY: test-coverage test-coverage: | $(GOCOVMERGE) @set -e; \ printf "" > coverage.txt; \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \ echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ (cd "$${dir}" && \ $(GO) list ./... \ | grep -v third_party \ | grep -v 'semconv/v.*' \ | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ $(GO) tool cover -html=coverage.out -o coverage.html); \ done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt # Adding a directory will include all benchmarks in that direcotry if a filter is not specified. BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) BENCHMARK_FILTER = . # You can override the filter for a particular directory by adding a rule here. benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample benchmark/%: @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ && cd $* \ $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix golangci-lint-fix: golangci-lint golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint/%: DIR=$* golangci-lint/%: | $(GOLANGCI_LINT) @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ && cd $(DIR) \ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) .PHONY: crosslink crosslink: | $(CROSSLINK) @echo "Updating intra-repository dependencies in all go modules" \ && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy/%: DIR=$* go-mod-tidy/%: | crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ && $(GO) mod tidy -compat=1.20 .PHONY: lint-modules lint-modules: go-mod-tidy .PHONY: lint lint: misspell lint-modules golangci-lint govulncheck .PHONY: vanity-import-check vanity-import-check: | $(PORTO) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) .PHONY: misspell misspell: | $(MISSPELL) @$(MISSPELL) -w $(ALL_DOCS) .PHONY: govulncheck govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck/%: DIR=$* govulncheck/%: | $(GOVULNCHECK) @echo "govulncheck ./... in $(DIR)" \ && cd $(DIR) \ && $(GOVULNCHECK) ./... .PHONY: codespell codespell: | $(CODESPELL) @$(DOCKERPY) $(CODESPELL) .PHONY: license-check license-check: @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi DEPENDABOT_CONFIG = .github/dependabot.yml .PHONY: dependabot-check dependabot-check: | $(DBOTCONF) @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) .PHONY: dependabot-generate dependabot-generate: | $(DBOTCONF) @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) .PHONY: check-clean-work-tree check-clean-work-tree: @if ! git diff --quiet; then \ echo; \ echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ echo; \ git status; \ exit 1; \ fi SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) gorelease/%: DIR=$* gorelease/%:| $(GORELEASE) @echo "gorelease in $(DIR):" \ && cd $(DIR) \ && $(GORELEASE) \ || echo "" .PHONY: prerelease prerelease: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} COMMIT ?= "HEAD" .PHONY: add-tags add-tags: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} .PHONY: lint-markdown lint-markdown: docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md opentelemetry-go-1.21.0/README.md000066400000000000000000000120151452547353200163620ustar00rootroot00000000000000# OpenTelemetry-Go [![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. ## Project Status | Signal | Status | |---------|------------| | Traces | Stable | | Metrics | Stable | | Logs | Design [1] | - [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). No Logs Pull Requests are currently being accepted. Progress and status specific to this repository is tracked in our [project boards](https://github.com/open-telemetry/opentelemetry-go/projects) and [milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). Project versioning information and stability guarantees can be found in the [versioning documentation](VERSIONING.md). ### Compatibility OpenTelemetry-Go ensures compatibility with the current supported versions of the [Go language](https://golang.org/doc/devel/release#policy): > Each major Go release is supported until there are two newer major releases. > For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. For versions of Go that are no longer supported upstream, opentelemetry-go will stop ensuring compatibility with these versions in the following manner: - A minor release of opentelemetry-go will be made to add support for the new supported release of Go. - The following minor release of opentelemetry-go will remove compatibility testing for the oldest (now archived upstream) version of Go. This, and future, releases of opentelemetry-go may include features only supported by the currently supported versions of Go. Currently, this project supports the following environments. | OS | Go Version | Architecture | |---------|------------|--------------| | Ubuntu | 1.21 | amd64 | | Ubuntu | 1.20 | amd64 | | Ubuntu | 1.21 | 386 | | Ubuntu | 1.20 | 386 | | MacOS | 1.21 | amd64 | | MacOS | 1.20 | amd64 | | Windows | 1.21 | amd64 | | Windows | 1.20 | amd64 | | Windows | 1.21 | 386 | | Windows | 1.20 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. ## Getting Started You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). OpenTelemetry's goal is to provide a single set of APIs to capture distributed traces and metrics from your application and send them to an observability platform. This project allows you to do just that for applications written in Go. There are two steps to this process: instrument your application, and configure an exporter. ### Instrumentation To start capturing distributed traces and metric events from your application it first needs to be instrumented. The easiest way to do this is by using an instrumentation library for your code. Be sure to check out [the officially supported instrumentation libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) package. The included [examples](./example/) are a good way to see some practical uses of this process. ### Export Now that your application is instrumented to collect telemetry, it needs an export pipeline to send that telemetry to an observability platform. All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). | Exporter | Metrics | Traces | |---------------------------------------|:-------:|:------:| | [OTLP](./exporters/otlp/) | ✓ | ✓ | | [Prometheus](./exporters/prometheus/) | ✓ | | | [stdout](./exporters/stdout/) | ✓ | ✓ | | [Zipkin](./exporters/zipkin/) | | ✓ | ## Contributing See the [contributing documentation](CONTRIBUTING.md). opentelemetry-go-1.21.0/RELEASING.md000066400000000000000000000131641452547353200167440ustar00rootroot00000000000000# Release Process ## Semantic Convention Generation New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. 1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. 2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` 3. Run the `make semconv-generate ...` target from this repository. For example, ```sh export TAG="v1.21.0" # Change to the release version you are generating. export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" docker pull otel/semconvgen:latest make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. ``` This should create a new sub-package of [`semconv`](./semconv). Ensure things look correct before submitting a pull request to include the addition. ## Breaking changes validation You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). ## Pre-Release First, decide which module sets will be released and update their versions in `versions.yaml`. Commit this change to a new branch. Update go.mod for submodules to depend on the new release which will happen in the next step. 1. Run the `prerelease` make target. It creates a branch `prerelease__` that will contain all release changes. ``` make prerelease MODSET= ``` 2. Verify the changes. ``` git diff ...prerelease__ ``` This should have changed the version for all modules to be ``. If these changes look correct, merge them into your pre-release branch: ```go git merge prerelease__ ``` 3. Update the [Changelog](./CHANGELOG.md). - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. To verify this, you can look directly at the commits since the ``. ``` git --no-pager log --pretty=oneline "..HEAD" ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. ## Tag Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. ***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! Failure to do so will leave things in a broken state. As long as you do not change `versions.yaml` between pre-release and this step, things should be fine. ***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). It is critical you make sure the version you push upstream is correct. [Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). 1. For each module set that will be released, run the `add-tags` make target using the `` of the commit on the main branch for the merged Pull Request. ``` make add-tags MODSET= COMMIT= ``` It should only be necessary to provide an explicit `COMMIT` value if the current `HEAD` of your working directory is not the correct commit. 2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). Make sure you push all sub-modules as well. ``` git push upstream git push upstream ... ``` ## Release Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. ## Verify Examples After releasing verify that examples build outside of the repository. ``` ./verify_examples.sh ``` The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. This ensures they build with the published release, not the local copy. ## Post-Release ### Contrib Repository Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. ### Website Documentation Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go]. Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. [OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions [Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/ [content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go ### Demo Repository Bump the dependencies in the following Go services: - [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) - [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) - [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) opentelemetry-go-1.21.0/VERSIONING.md000066400000000000000000000253541452547353200171220ustar00rootroot00000000000000# Versioning This document describes the versioning policy for this repository. This policy is designed so the following goals can be achieved. **Users are provided a codebase of value that is stable and secure.** ## Policy * Versioning of this project will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. * New methods may be added to exported API interfaces. All exported interfaces that fall within this exception will include the following paragraph in their public documentation. > Warning: methods may be added to this interface in minor releases. * If a module is version `v2` or higher, the major version of the module must be included as a `/vN` at the end of the module paths used in `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the paths used in `go get` commands (e.g., `go get go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). * If a module is version `v0` or `v1`, do not include the major version in either the module path or the import path. * Modules will be used to encapsulate signals and components. * Experimental modules still under active development will be versioned at `v0` to imply the stability guarantee defined by [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). > Major version zero (0.y.z) is for initial development. Anything MAY > change at any time. The public API SHOULD NOT be considered stable. * Mature modules for which we guarantee a stable public API will be versioned with a major version greater than `v0`. * The decision to make a module stable will be made on a case-by-case basis by the maintainers of this project. * Experimental modules will start their versioning at `v0.0.0` and will increment their minor version when backwards incompatible changes are released and increment their patch version when backwards compatible changes are released. * All stable modules that use the same major version number will use the same entire version number. * Stable modules may be released with an incremented minor or patch version even though that module has not been changed, but rather so that it will remain at the same version as other stable modules that did undergo change. * When an experimental module becomes stable a new stable module version will be released and will include this now stable module. The new stable module version will be an increment of the minor version number and will be applied to all existing stable modules as well as the newly stable module being released. * Versioning of the associated [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of this project will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules). * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used. * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). * If a module is version `v2` or higher, the major version of the module must be included as a `/vN` at the end of the module paths used in `go.mod` files (e.g., `module go.opentelemetry.io/contrib/instrumentation/host/v2`, `require go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the package import path (e.g., `import "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes the paths used in `go get` commands (e.g., `go get go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there is both a `/v2` and a `@v2.0.1` in that example. One way to think about it is that the module name now includes the `/v2`, so include `/v2` whenever you are using the module name). * If a module is version `v0` or `v1`, do not include the major version in either the module path or the import path. * In addition to public APIs, telemetry produced by stable instrumentation will remain stable and backwards compatible. This is to avoid breaking alerts and dashboard. * Modules will be used to encapsulate instrumentation, detectors, exporters, propagators, and any other independent sets of related components. * Experimental modules still under active development will be versioned at `v0` to imply the stability guarantee defined by [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). > Major version zero (0.y.z) is for initial development. Anything MAY > change at any time. The public API SHOULD NOT be considered stable. * Mature modules for which we guarantee a stable public API and telemetry will be versioned with a major version greater than `v0`. * Experimental modules will start their versioning at `v0.0.0` and will increment their minor version when backwards incompatible changes are released and increment their patch version when backwards compatible changes are released. * Stable contrib modules cannot depend on experimental modules from this project. * All stable contrib modules of the same major version with this project will use the same entire version as this project. * Stable modules may be released with an incremented minor or patch version even though that module's code has not been changed. Instead the only change that will have been included is to have updated that modules dependency on this project's stable APIs. * When an experimental module in contrib becomes stable a new stable module version will be released and will include this now stable module. The new stable module version will be an increment of the minor version number and will be applied to all existing stable contrib modules, this project's modules, and the newly stable module being released. * Contrib modules will be kept up to date with this project's releases. * Due to the dependency contrib modules will implicitly have on this project's modules the release of stable contrib modules to match the released version number will be staggered after this project's release. There is no explicit time guarantee for how long after this projects release the contrib release will be. Effort should be made to keep them as close in time as possible. * No additional stable release in this project can be made until the contrib repository has a matching stable release. * No release can be made in the contrib repository after this project's stable release except for a stable release of the contrib repository. * GitHub releases will be made for all releases. * Go modules will be made available at Go package mirrors. ## Example Versioning Lifecycle To better understand the implementation of the above policy the following example is provided. This project is simplified to include only the following modules and their versions: * `otel`: `v0.14.0` * `otel/trace`: `v0.14.0` * `otel/metric`: `v0.14.0` * `otel/baggage`: `v0.14.0` * `otel/sdk/trace`: `v0.14.0` * `otel/sdk/metric`: `v0.14.0` These modules have been developed to a point where the `otel/trace`, `otel/baggage`, and `otel/sdk/trace` modules have reached a point that they should be considered for a stable release. The `otel/metric` and `otel/sdk/metric` are still under active development and the `otel` module depends on both `otel/trace` and `otel/metric`. The `otel` package is refactored to remove its dependencies on `otel/metric` so it can be released as stable as well. With that done the following release candidates are made: * `otel`: `v1.0.0-RC1` * `otel/trace`: `v1.0.0-RC1` * `otel/baggage`: `v1.0.0-RC1` * `otel/sdk/trace`: `v1.0.0-RC1` The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. A few minor issues are discovered in the `otel/trace` package. These issues are resolved with some minor, but backwards incompatible, changes and are released as a second release candidate: * `otel`: `v1.0.0-RC2` * `otel/trace`: `v1.0.0-RC2` * `otel/baggage`: `v1.0.0-RC2` * `otel/sdk/trace`: `v1.0.0-RC2` Notice that all module version numbers are incremented to adhere to our versioning policy. After these release candidates have been evaluated to satisfaction, they are released as version `v1.0.0`. * `otel`: `v1.0.0` * `otel/trace`: `v1.0.0` * `otel/baggage`: `v1.0.0` * `otel/sdk/trace`: `v1.0.0` Since both the `go` utility and the Go module system support [the semantic versioning definition of precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release will correctly be interpreted as the successor to the previous release candidates. Active development of this project continues. The `otel/metric` module now has backwards incompatible changes to its API that need to be released and the `otel/baggage` module has a minor bug fix that needs to be released. The following release is made: * `otel`: `v1.0.1` * `otel/trace`: `v1.0.1` * `otel/metric`: `v0.15.0` * `otel/baggage`: `v1.0.1` * `otel/sdk/trace`: `v1.0.1` * `otel/sdk/metric`: `v0.15.0` Notice that, again, all stable module versions are incremented in unison and the `otel/sdk/metric` package, which depends on the `otel/metric` package, also bumped its version. This bump of the `otel/sdk/metric` package makes sense given their coupling, though it is not explicitly required by our versioning policy. As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a point where they should be evaluated for stability. The `otel` module is reintegrated with the `otel/metric` package and the following release is made: * `otel`: `v1.1.0-RC1` * `otel/trace`: `v1.1.0-RC1` * `otel/metric`: `v1.1.0-RC1` * `otel/baggage`: `v1.1.0-RC1` * `otel/sdk/trace`: `v1.1.0-RC1` * `otel/sdk/metric`: `v1.1.0-RC1` All the modules are evaluated and determined to a viable stable release. They are then released as version `v1.1.0` (the minor version is incremented to indicate the addition of new signal). * `otel`: `v1.1.0` * `otel/trace`: `v1.1.0` * `otel/metric`: `v1.1.0` * `otel/baggage`: `v1.1.0` * `otel/sdk/trace`: `v1.1.0` * `otel/sdk/metric`: `v1.1.0` opentelemetry-go-1.21.0/attribute/000077500000000000000000000000001452547353200171075ustar00rootroot00000000000000opentelemetry-go-1.21.0/attribute/benchmark_test.go000066400000000000000000000143451452547353200224360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute_test import ( "testing" "go.opentelemetry.io/otel/attribute" ) // Store results in a file scope var to ensure compiler does not optimize the // test away. var ( outV attribute.Value outKV attribute.KeyValue outBool bool outBoolSlice []bool outInt64 int64 outInt64Slice []int64 outFloat64 float64 outFloat64Slice []float64 outStr string outStrSlice []string ) func benchmarkEmit(kv attribute.KeyValue) func(*testing.B) { return func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outStr = kv.Value.Emit() } } } func BenchmarkBool(b *testing.B) { k, v := "bool", true kv := attribute.Bool(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.BoolValue(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.Bool(k, v) } }) b.Run("AsBool", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outBool = kv.Value.AsBool() } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkBoolSlice(b *testing.B) { k, v := "bool slice", []bool{true, false, true} kv := attribute.BoolSlice(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.BoolSliceValue(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.BoolSlice(k, v) } }) b.Run("AsBoolSlice", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outBoolSlice = kv.Value.AsBoolSlice() } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkInt(b *testing.B) { k, v := "int", int(42) kv := attribute.Int(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.IntValue(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.Int(k, v) } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkIntSlice(b *testing.B) { k, v := "int slice", []int{42, -3, 12} kv := attribute.IntSlice(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.IntSliceValue(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.IntSlice(k, v) } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkInt64(b *testing.B) { k, v := "int64", int64(42) kv := attribute.Int64(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.Int64Value(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.Int64(k, v) } }) b.Run("AsInt64", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outInt64 = kv.Value.AsInt64() } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkInt64Slice(b *testing.B) { k, v := "int64 slice", []int64{42, -3, 12} kv := attribute.Int64Slice(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.Int64SliceValue(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.Int64Slice(k, v) } }) b.Run("AsInt64Slice", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outInt64Slice = kv.Value.AsInt64Slice() } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkFloat64(b *testing.B) { k, v := "float64", float64(42) kv := attribute.Float64(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.Float64Value(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.Float64(k, v) } }) b.Run("AsFloat64", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outFloat64 = kv.Value.AsFloat64() } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkFloat64Slice(b *testing.B) { k, v := "float64 slice", []float64{42, -3, 12} kv := attribute.Float64Slice(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.Float64SliceValue(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.Float64Slice(k, v) } }) b.Run("AsFloat64Slice", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outFloat64Slice = kv.Value.AsFloat64Slice() } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkString(b *testing.B) { k, v := "string", "42" kv := attribute.String(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.StringValue(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.String(k, v) } }) b.Run("AsString", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outStr = kv.Value.AsString() } }) b.Run("Emit", benchmarkEmit(kv)) } func BenchmarkStringSlice(b *testing.B) { k, v := "float64 slice", []string{"forty-two", "negative three", "twelve"} kv := attribute.StringSlice(k, v) b.Run("Value", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outV = attribute.StringSliceValue(v) } }) b.Run("KeyValue", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outKV = attribute.StringSlice(k, v) } }) b.Run("AsStringSlice", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { outStrSlice = kv.Value.AsStringSlice() } }) b.Run("Emit", benchmarkEmit(kv)) } opentelemetry-go-1.21.0/attribute/doc.go000066400000000000000000000013171452547353200202050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package attribute provides key and value attributes. package attribute // import "go.opentelemetry.io/otel/attribute" opentelemetry-go-1.21.0/attribute/encoder.go000066400000000000000000000110101452547353200210460ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute // import "go.opentelemetry.io/otel/attribute" import ( "bytes" "sync" "sync/atomic" ) type ( // Encoder is a mechanism for serializing an attribute set into a specific // string representation that supports caching, to avoid repeated // serialization. An example could be an exporter encoding the attribute // set into a wire representation. Encoder interface { // Encode returns the serialized encoding of the attribute set using // its Iterator. This result may be cached by a attribute.Set. Encode(iterator Iterator) string // ID returns a value that is unique for each class of attribute // encoder. Attribute encoders allocate these using `NewEncoderID`. ID() EncoderID } // EncoderID is used to identify distinct Encoder // implementations, for caching encoded results. EncoderID struct { value uint64 } // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of // allocations used in encoding attributes. This implementation encodes a // comma-separated list of key=value, with '/'-escaping of '=', ',', and // '\'. defaultAttrEncoder struct { // pool is a pool of attribute set builders. The buffers in this pool // grow to a size that most attribute encodings will not allocate new // memory. pool sync.Pool // *bytes.Buffer } ) // escapeChar is used to ensure uniqueness of the attribute encoding where // keys or values contain either '=' or ','. Since there is no parser needed // for this encoding and its only requirement is to be unique, this choice is // arbitrary. Users will see these in some exporters (e.g., stdout), so the // backslash ('\') is used as a conventional choice. const escapeChar = '\\' var ( _ Encoder = &defaultAttrEncoder{} // encoderIDCounter is for generating IDs for other attribute encoders. encoderIDCounter uint64 defaultEncoderOnce sync.Once defaultEncoderID = NewEncoderID() defaultEncoderInstance *defaultAttrEncoder ) // NewEncoderID returns a unique attribute encoder ID. It should be called // once per each type of attribute encoder. Preferably in init() or in var // definition. func NewEncoderID() EncoderID { return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} } // DefaultEncoder returns an attribute encoder that encodes attributes in such // a way that each escaped attribute's key is followed by an equal sign and // then by an escaped attribute's value. All key-value pairs are separated by // a comma. // // Escaping is done by prepending a backslash before either a backslash, equal // sign or a comma. func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ New: func() interface{} { return &bytes.Buffer{} }, }, } }) return defaultEncoderInstance } // Encode is a part of an implementation of the AttributeEncoder interface. func (d *defaultAttrEncoder) Encode(iter Iterator) string { buf := d.pool.Get().(*bytes.Buffer) defer d.pool.Put(buf) buf.Reset() for iter.Next() { i, keyValue := iter.IndexedAttribute() if i > 0 { _, _ = buf.WriteRune(',') } copyAndEscape(buf, string(keyValue.Key)) _, _ = buf.WriteRune('=') if keyValue.Value.Type() == STRING { copyAndEscape(buf, keyValue.Value.AsString()) } else { _, _ = buf.WriteString(keyValue.Value.Emit()) } } return buf.String() } // ID is a part of an implementation of the AttributeEncoder interface. func (*defaultAttrEncoder) ID() EncoderID { return defaultEncoderID } // copyAndEscape escapes `=`, `,` and its own escape character (`\`), // making the default encoding unique. func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: _, _ = buf.WriteRune(escapeChar) } _, _ = buf.WriteRune(ch) } } // Valid returns true if this encoder ID was allocated by // `NewEncoderID`. Invalid encoder IDs will not be cached. func (id EncoderID) Valid() bool { return id.value != 0 } opentelemetry-go-1.21.0/attribute/filter.go000066400000000000000000000036071452547353200207310ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute // import "go.opentelemetry.io/otel/attribute" // Filter supports removing certain attributes from attribute sets. When // the filter returns true, the attribute will be kept in the filtered // attribute set. When the filter returns false, the attribute is excluded // from the filtered attribute set, and the attribute instead appears in // the removed list of excluded attributes. type Filter func(KeyValue) bool // NewAllowKeysFilter returns a Filter that only allows attributes with one of // the provided keys. // // If keys is empty a deny-all filter is returned. func NewAllowKeysFilter(keys ...Key) Filter { if len(keys) <= 0 { return func(kv KeyValue) bool { return false } } allowed := make(map[Key]struct{}) for _, k := range keys { allowed[k] = struct{}{} } return func(kv KeyValue) bool { _, ok := allowed[kv.Key] return ok } } // NewDenyKeysFilter returns a Filter that only allows attributes // that do not have one of the provided keys. // // If keys is empty an allow-all filter is returned. func NewDenyKeysFilter(keys ...Key) Filter { if len(keys) <= 0 { return func(kv KeyValue) bool { return true } } forbid := make(map[Key]struct{}) for _, k := range keys { forbid[k] = struct{}{} } return func(kv KeyValue) bool { _, ok := forbid[kv.Key] return !ok } } opentelemetry-go-1.21.0/attribute/filter_test.go000066400000000000000000000046241452547353200217700ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute import "testing" func TestNewAllowKeysFilter(t *testing.T) { keys := []string{"zero", "one", "two"} attrs := []KeyValue{Int(keys[0], 0), Int(keys[1], 1), Int(keys[2], 2)} t.Run("Empty", func(t *testing.T) { empty := NewAllowKeysFilter() for _, kv := range attrs { if empty(kv) { t.Errorf("empty NewAllowKeysFilter filter accepted %v", kv) } } }) t.Run("Partial", func(t *testing.T) { partial := NewAllowKeysFilter(Key(keys[0]), Key(keys[1])) for _, kv := range attrs[:2] { if !partial(kv) { t.Errorf("partial NewAllowKeysFilter filter denied %v", kv) } } if partial(attrs[2]) { t.Errorf("partial NewAllowKeysFilter filter accepted %v", attrs[2]) } }) t.Run("Full", func(t *testing.T) { full := NewAllowKeysFilter(Key(keys[0]), Key(keys[1]), Key(keys[2])) for _, kv := range attrs { if !full(kv) { t.Errorf("full NewAllowKeysFilter filter denied %v", kv) } } }) } func TestNewDenyKeysFilter(t *testing.T) { keys := []string{"zero", "one", "two"} attrs := []KeyValue{Int(keys[0], 0), Int(keys[1], 1), Int(keys[2], 2)} t.Run("Empty", func(t *testing.T) { empty := NewDenyKeysFilter() for _, kv := range attrs { if !empty(kv) { t.Errorf("empty NewDenyKeysFilter filter denied %v", kv) } } }) t.Run("Partial", func(t *testing.T) { partial := NewDenyKeysFilter(Key(keys[0]), Key(keys[1])) for _, kv := range attrs[:2] { if partial(kv) { t.Errorf("partial NewDenyKeysFilter filter accepted %v", kv) } } if !partial(attrs[2]) { t.Errorf("partial NewDenyKeysFilter filter denied %v", attrs[2]) } }) t.Run("Full", func(t *testing.T) { full := NewDenyKeysFilter(Key(keys[0]), Key(keys[1]), Key(keys[2])) for _, kv := range attrs { if full(kv) { t.Errorf("full NewDenyKeysFilter filter accepted %v", kv) } } }) } opentelemetry-go-1.21.0/attribute/iterator.go000066400000000000000000000077061452547353200213010ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute // import "go.opentelemetry.io/otel/attribute" // Iterator allows iterating over the set of attributes in order, sorted by // key. type Iterator struct { storage *Set idx int } // MergeIterator supports iterating over two sets of attributes while // eliminating duplicate values from the combined set. The first iterator // value takes precedence. type MergeIterator struct { one oneIterator two oneIterator current KeyValue } type oneIterator struct { iter Iterator done bool attr KeyValue } // Next moves the iterator to the next position. Returns false if there are no // more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() } // Label returns current KeyValue. Must be called only after Next returns // true. // // Deprecated: Use Attribute instead. func (i *Iterator) Label() KeyValue { return i.Attribute() } // Attribute returns the current KeyValue of the Iterator. It must be called // only after Next returns true. func (i *Iterator) Attribute() KeyValue { kv, _ := i.storage.Get(i.idx) return kv } // IndexedLabel returns current index and attribute. Must be called only // after Next returns true. // // Deprecated: Use IndexedAttribute instead. func (i *Iterator) IndexedLabel() (int, KeyValue) { return i.idx, i.Attribute() } // IndexedAttribute returns current index and attribute. Must be called only // after Next returns true. func (i *Iterator) IndexedAttribute() (int, KeyValue) { return i.idx, i.Attribute() } // Len returns a number of attributes in the iterated set. func (i *Iterator) Len() int { return i.storage.Len() } // ToSlice is a convenience function that creates a slice of attributes from // the passed iterator. The iterator is set up to start from the beginning // before creating the slice. func (i *Iterator) ToSlice() []KeyValue { l := i.Len() if l == 0 { return nil } i.idx = -1 slice := make([]KeyValue, 0, l) for i.Next() { slice = append(slice, i.Attribute()) } return slice } // NewMergeIterator returns a MergeIterator for merging two attribute sets. // Duplicates are resolved by taking the value from the first set. func NewMergeIterator(s1, s2 *Set) MergeIterator { mi := MergeIterator{ one: makeOne(s1.Iter()), two: makeOne(s2.Iter()), } return mi } func makeOne(iter Iterator) oneIterator { oi := oneIterator{ iter: iter, } oi.advance() return oi } func (oi *oneIterator) advance() { if oi.done = !oi.iter.Next(); !oi.done { oi.attr = oi.iter.Attribute() } } // Next returns true if there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false } if m.one.done { m.current = m.two.attr m.two.advance() return true } if m.two.done { m.current = m.one.attr m.one.advance() return true } if m.one.attr.Key == m.two.attr.Key { m.current = m.one.attr // first iterator attribute value wins m.one.advance() m.two.advance() return true } if m.one.attr.Key < m.two.attr.Key { m.current = m.one.attr m.one.advance() return true } m.current = m.two.attr m.two.advance() return true } // Label returns the current value after Next() returns true. // // Deprecated: Use Attribute instead. func (m *MergeIterator) Label() KeyValue { return m.current } // Attribute returns the current value after Next() returns true. func (m *MergeIterator) Attribute() KeyValue { return m.current } opentelemetry-go-1.21.0/attribute/iterator_test.go000066400000000000000000000066201452547353200223320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute_test import ( "fmt" "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" ) func TestIterator(t *testing.T) { one := attribute.String("one", "1") two := attribute.Int("two", 2) lbl := attribute.NewSet(one, two) iter := lbl.Iter() require.Equal(t, 2, iter.Len()) require.True(t, iter.Next()) require.Equal(t, one, iter.Attribute()) idx, attr := iter.IndexedAttribute() require.Equal(t, 0, idx) require.Equal(t, one, attr) require.Equal(t, 2, iter.Len()) require.True(t, iter.Next()) require.Equal(t, two, iter.Attribute()) idx, attr = iter.IndexedAttribute() require.Equal(t, 1, idx) require.Equal(t, two, attr) require.Equal(t, 2, iter.Len()) require.False(t, iter.Next()) require.Equal(t, 2, iter.Len()) } func TestEmptyIterator(t *testing.T) { lbl := attribute.NewSet() iter := lbl.Iter() require.Equal(t, 0, iter.Len()) require.False(t, iter.Next()) } func TestMergedIterator(t *testing.T) { type inputs struct { name string keys1 []string keys2 []string expect []string } makeAttributes := func(keys []string, num int) (result []attribute.KeyValue) { for _, k := range keys { result = append(result, attribute.Int(k, num)) } return } for _, input := range []inputs{ { name: "one overlap", keys1: []string{"A", "B"}, keys2: []string{"B", "C"}, expect: []string{"A/1", "B/1", "C/2"}, }, { name: "reversed one overlap", keys1: []string{"B", "A"}, keys2: []string{"C", "B"}, expect: []string{"A/1", "B/1", "C/2"}, }, { name: "one empty", keys1: nil, keys2: []string{"C", "B"}, expect: []string{"B/2", "C/2"}, }, { name: "two empty", keys1: []string{"C", "B"}, keys2: nil, expect: []string{"B/1", "C/1"}, }, { name: "no overlap both", keys1: []string{"C"}, keys2: []string{"B"}, expect: []string{"B/2", "C/1"}, }, { name: "one empty single two", keys1: nil, keys2: []string{"B"}, expect: []string{"B/2"}, }, { name: "two empty single one", keys1: []string{"A"}, keys2: nil, expect: []string{"A/1"}, }, { name: "all empty", keys1: nil, keys2: nil, expect: nil, }, { name: "full overlap", keys1: []string{"A", "B", "C", "D"}, keys2: []string{"A", "B", "C", "D"}, expect: []string{"A/1", "B/1", "C/1", "D/1"}, }, } { t.Run(input.name, func(t *testing.T) { attr1 := makeAttributes(input.keys1, 1) attr2 := makeAttributes(input.keys2, 2) set1 := attribute.NewSet(attr1...) set2 := attribute.NewSet(attr2...) merge := attribute.NewMergeIterator(&set1, &set2) var result []string for merge.Next() { attr := merge.Attribute() result = append(result, fmt.Sprint(attr.Key, "/", attr.Value.Emit())) } require.Equal(t, input.expect, result) }) } } opentelemetry-go-1.21.0/attribute/key.go000066400000000000000000000074761452547353200202440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute // import "go.opentelemetry.io/otel/attribute" // Key represents the key part in key-value pairs. It's a string. The // allowed character set in the key depends on the use of the key. type Key string // Bool creates a KeyValue instance with a BOOL Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- Bool(name, value). func (k Key) Bool(v bool) KeyValue { return KeyValue{ Key: k, Value: BoolValue(v), } } // BoolSlice creates a KeyValue instance with a BOOLSLICE Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- BoolSlice(name, value). func (k Key) BoolSlice(v []bool) KeyValue { return KeyValue{ Key: k, Value: BoolSliceValue(v), } } // Int creates a KeyValue instance with an INT64 Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- Int(name, value). func (k Key) Int(v int) KeyValue { return KeyValue{ Key: k, Value: IntValue(v), } } // IntSlice creates a KeyValue instance with an INT64SLICE Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- IntSlice(name, value). func (k Key) IntSlice(v []int) KeyValue { return KeyValue{ Key: k, Value: IntSliceValue(v), } } // Int64 creates a KeyValue instance with an INT64 Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- Int64(name, value). func (k Key) Int64(v int64) KeyValue { return KeyValue{ Key: k, Value: Int64Value(v), } } // Int64Slice creates a KeyValue instance with an INT64SLICE Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- Int64Slice(name, value). func (k Key) Int64Slice(v []int64) KeyValue { return KeyValue{ Key: k, Value: Int64SliceValue(v), } } // Float64 creates a KeyValue instance with a FLOAT64 Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- Float64(name, value). func (k Key) Float64(v float64) KeyValue { return KeyValue{ Key: k, Value: Float64Value(v), } } // Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- Float64(name, value). func (k Key) Float64Slice(v []float64) KeyValue { return KeyValue{ Key: k, Value: Float64SliceValue(v), } } // String creates a KeyValue instance with a STRING Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- String(name, value). func (k Key) String(v string) KeyValue { return KeyValue{ Key: k, Value: StringValue(v), } } // StringSlice creates a KeyValue instance with a STRINGSLICE Value. // // If creating both a key and value at the same time, use the provided // convenience function instead -- StringSlice(name, value). func (k Key) StringSlice(v []string) KeyValue { return KeyValue{ Key: k, Value: StringSliceValue(v), } } // Defined returns true for non-empty keys. func (k Key) Defined() bool { return len(k) != 0 } opentelemetry-go-1.21.0/attribute/key_test.go000066400000000000000000000047751452547353200213020ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute_test import ( "encoding/json" "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" ) func TestDefined(t *testing.T) { for _, testcase := range []struct { name string k attribute.Key want bool }{ { name: "Key.Defined() returns true when len(v.Name) != 0", k: attribute.Key("foo"), want: true, }, { name: "Key.Defined() returns false when len(v.Name) == 0", k: attribute.Key(""), want: false, }, } { t.Run(testcase.name, func(t *testing.T) { // func (k attribute.Key) Defined() bool { have := testcase.k.Defined() if have != testcase.want { t.Errorf("Want: %v, but have: %v", testcase.want, have) } }) } } func TestJSONValue(t *testing.T) { var kvs interface{} = [2]attribute.KeyValue{ attribute.String("A", "B"), attribute.Int64("C", 1), } data, err := json.Marshal(kvs) require.NoError(t, err) require.Equal(t, `[{"Key":"A","Value":{"Type":"STRING","Value":"B"}},{"Key":"C","Value":{"Type":"INT64","Value":1}}]`, string(data)) } func TestEmit(t *testing.T) { for _, testcase := range []struct { name string v attribute.Value want string }{ { name: `test Key.Emit() can emit a string representing self.BOOL`, v: attribute.BoolValue(true), want: "true", }, { name: `test Key.Emit() can emit a string representing self.INT64`, v: attribute.Int64Value(42), want: "42", }, { name: `test Key.Emit() can emit a string representing self.FLOAT64`, v: attribute.Float64Value(42.1), want: "42.1", }, { name: `test Key.Emit() can emit a string representing self.STRING`, v: attribute.StringValue("foo"), want: "foo", }, } { t.Run(testcase.name, func(t *testing.T) { // proto: func (v attribute.Value) Emit() string { have := testcase.v.Emit() if have != testcase.want { t.Errorf("Want: %s, but have: %s", testcase.want, have) } }) } } opentelemetry-go-1.21.0/attribute/kv.go000066400000000000000000000046321452547353200200630ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute // import "go.opentelemetry.io/otel/attribute" import ( "fmt" ) // KeyValue holds a key and value pair. type KeyValue struct { Key Key Value Value } // Valid returns if kv is a valid OpenTelemetry attribute. func (kv KeyValue) Valid() bool { return kv.Key.Defined() && kv.Value.Type() != INVALID } // Bool creates a KeyValue with a BOOL Value type. func Bool(k string, v bool) KeyValue { return Key(k).Bool(v) } // BoolSlice creates a KeyValue with a BOOLSLICE Value type. func BoolSlice(k string, v []bool) KeyValue { return Key(k).BoolSlice(v) } // Int creates a KeyValue with an INT64 Value type. func Int(k string, v int) KeyValue { return Key(k).Int(v) } // IntSlice creates a KeyValue with an INT64SLICE Value type. func IntSlice(k string, v []int) KeyValue { return Key(k).IntSlice(v) } // Int64 creates a KeyValue with an INT64 Value type. func Int64(k string, v int64) KeyValue { return Key(k).Int64(v) } // Int64Slice creates a KeyValue with an INT64SLICE Value type. func Int64Slice(k string, v []int64) KeyValue { return Key(k).Int64Slice(v) } // Float64 creates a KeyValue with a FLOAT64 Value type. func Float64(k string, v float64) KeyValue { return Key(k).Float64(v) } // Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. func Float64Slice(k string, v []float64) KeyValue { return Key(k).Float64Slice(v) } // String creates a KeyValue with a STRING Value type. func String(k, v string) KeyValue { return Key(k).String(v) } // StringSlice creates a KeyValue with a STRINGSLICE Value type. func StringSlice(k string, v []string) KeyValue { return Key(k).StringSlice(v) } // Stringer creates a new key-value pair with a passed name and a string // value generated by the passed Stringer interface. func Stringer(k string, v fmt.Stringer) KeyValue { return Key(k).String(v.String()) } opentelemetry-go-1.21.0/attribute/kv_test.go000066400000000000000000000076711452547353200211300ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute_test import ( "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" ) func TestKeyValueConstructors(t *testing.T) { tt := []struct { name string actual attribute.KeyValue expected attribute.KeyValue }{ { name: "Bool", actual: attribute.Bool("k1", true), expected: attribute.KeyValue{ Key: "k1", Value: attribute.BoolValue(true), }, }, { name: "Int64", actual: attribute.Int64("k1", 123), expected: attribute.KeyValue{ Key: "k1", Value: attribute.Int64Value(123), }, }, { name: "Float64", actual: attribute.Float64("k1", 123.5), expected: attribute.KeyValue{ Key: "k1", Value: attribute.Float64Value(123.5), }, }, { name: "String", actual: attribute.String("k1", "123.5"), expected: attribute.KeyValue{ Key: "k1", Value: attribute.StringValue("123.5"), }, }, { name: "Int", actual: attribute.Int("k1", 123), expected: attribute.KeyValue{ Key: "k1", Value: attribute.IntValue(123), }, }, } for _, test := range tt { t.Run(test.name, func(t *testing.T) { if diff := cmp.Diff(test.actual, test.expected, cmp.AllowUnexported(attribute.Value{})); diff != "" { t.Fatal(diff) } }) } } func TestKeyValueValid(t *testing.T) { tests := []struct { desc string valid bool kv attribute.KeyValue }{ { desc: "uninitialized KeyValue should be invalid", valid: false, kv: attribute.KeyValue{}, }, { desc: "empty key value should be invalid", valid: false, kv: attribute.Key("").Bool(true), }, { desc: "INVALID value type should be invalid", valid: false, kv: attribute.KeyValue{ Key: attribute.Key("valid key"), // Default type is INVALID. Value: attribute.Value{}, }, }, { desc: "non-empty key with BOOL type Value should be valid", valid: true, kv: attribute.Bool("bool", true), }, { desc: "non-empty key with INT64 type Value should be valid", valid: true, kv: attribute.Int64("int64", 0), }, { desc: "non-empty key with FLOAT64 type Value should be valid", valid: true, kv: attribute.Float64("float64", 0), }, { desc: "non-empty key with STRING type Value should be valid", valid: true, kv: attribute.String("string", ""), }, } for _, test := range tests { if got, want := test.kv.Valid(), test.valid; got != want { t.Error(test.desc) } } } func TestIncorrectCast(t *testing.T) { testCases := []struct { name string val attribute.Value }{ { name: "Float64", val: attribute.Float64Value(1.0), }, { name: "Int64", val: attribute.Int64Value(2), }, { name: "String", val: attribute.BoolValue(true), }, { name: "Float64Slice", val: attribute.Float64SliceValue([]float64{1.0}), }, { name: "Int64Slice", val: attribute.Int64SliceValue([]int64{2}), }, { name: "StringSlice", val: attribute.BoolSliceValue([]bool{true}), }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { assert.NotPanics(t, func() { tt.val.AsBool() tt.val.AsBoolSlice() tt.val.AsFloat64() tt.val.AsFloat64Slice() tt.val.AsInt64() tt.val.AsInt64Slice() tt.val.AsInterface() tt.val.AsString() tt.val.AsStringSlice() }) }) } } opentelemetry-go-1.21.0/attribute/set.go000066400000000000000000000266201452547353200202370ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "reflect" "sort" "sync" ) type ( // Set is the representation for a distinct attribute set. It manages an // immutable set of attributes, with an internal cache for storing // attribute encodings. // // This type supports the Equivalent method of comparison using values of // type Distinct. Set struct { equivalent Distinct } // Distinct wraps a variable-size array of KeyValue, constructed with keys // in sorted order. This can be used as a map key or for equality checking // between Sets. Distinct struct { iface interface{} } // Sortable implements sort.Interface, used for sorting KeyValue. This is // an exported type to support a memory optimization. A pointer to one of // these is needed for the call to sort.Stable(), which the caller may // provide in order to avoid an allocation. See NewSetWithSortable(). Sortable []KeyValue ) var ( // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) // emptySet is returned for empty attribute sets. emptySet = &Set{ equivalent: Distinct{ iface: [0]KeyValue{}, }, } // sortables is a pool of Sortables used to create Sets with a user does // not provide one. sortables = sync.Pool{ New: func() interface{} { return new(Sortable) }, } ) // EmptySet returns a reference to a Set with no elements. // // This is a convenience provided for optimized calling utility. func EmptySet() *Set { return emptySet } // reflectValue abbreviates reflect.ValueOf(d). func (d Distinct) reflectValue() reflect.Value { return reflect.ValueOf(d.iface) } // Valid returns true if this value refers to a valid Set. func (d Distinct) Valid() bool { return d.iface != nil } // Len returns the number of attributes in this set. func (l *Set) Len() int { if l == nil || !l.equivalent.Valid() { return 0 } return l.equivalent.reflectValue().Len() } // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { if l == nil || !l.equivalent.Valid() { return KeyValue{}, false } value := l.equivalent.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for // the interface{} conversion here: return value.Index(idx).Interface().(KeyValue), true } return KeyValue{}, false } // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { if l == nil || !l.equivalent.Valid() { return Value{}, false } rValue := l.equivalent.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { return rValue.Index(idx).Interface().(KeyValue).Key >= k }) if idx >= vlen { return Value{}, false } keyValue := rValue.Index(idx).Interface().(KeyValue) if k == keyValue.Key { return keyValue.Value, true } return Value{}, false } // HasValue tests whether a key is defined in this set. func (l *Set) HasValue(k Key) bool { if l == nil { return false } _, ok := l.Value(k) return ok } // Iter returns an iterator for visiting the attributes in this set. func (l *Set) Iter() Iterator { return Iterator{ storage: l, idx: -1, } } // ToSlice returns the set of attributes belonging to this set, sorted, where // keys appear no more than once. func (l *Set) ToSlice() []KeyValue { iter := l.Iter() return iter.ToSlice() } // Equivalent returns a value that may be used as a map key. The Distinct type // guarantees that the result will equal the equivalent. Distinct value of any // attribute set with the same elements as this, where sets are made unique by // choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { if l == nil || !l.equivalent.Valid() { return emptySet.equivalent } return l.equivalent } // Equals returns true if the argument set is equivalent to this set. func (l *Set) Equals(o *Set) bool { return l.Equivalent() == o.Equivalent() } // Encoded returns the encoded form of this set, according to encoder. func (l *Set) Encoded(encoder Encoder) string { if l == nil || encoder == nil { return "" } return encoder.Encode(l.Iter()) } func empty() Set { return Set{ equivalent: emptySet.equivalent, } } // NewSet returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // // Except for empty sets, this method adds an additional allocation compared // with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { // Check for empty set. if len(kvs) == 0 { return empty() } srt := sortables.Get().(*Sortable) s, _ := NewSetWithSortableFiltered(kvs, srt, nil) sortables.Put(srt) return s } // NewSetWithSortable returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // // This call includes a Sortable option as a memory optimization. func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { // Check for empty set. if len(kvs) == 0 { return empty() } s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) return s } // NewSetWithFiltered returns a new Set. See the documentation for // NewSetWithSortableFiltered for more details. // // This call includes a Filter to include/exclude attribute keys from the // return value. Excluded keys are returned as a slice of attribute values. func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { return empty(), nil } srt := sortables.Get().(*Sortable) s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) sortables.Put(srt) return s, filtered } // NewSetWithSortableFiltered returns a new Set. // // Duplicate keys are eliminated by taking the last value. This // re-orders the input slice so that unique last-values are contiguous // at the end of the slice. // // This ensures the following: // // - Last-value-wins semantics // - Caller sees the reordering, but doesn't lose values // - Repeated call preserve last-value wins. // // Note that methods are defined on Set, although this returns Set. Callers // can avoid memory allocations by: // // - allocating a Sortable for use as a temporary in this method // - allocating a Set for storing the return value of this constructor. // // The result maintains a cache of encoded attributes, by attribute.EncoderID. // This value should not be copied after its first use. // // The second []KeyValue return value is a list of attributes that were // excluded by the Filter (if non-nil). func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { return empty(), nil } *tmp = kvs // Stable sort so the following de-duplication can implement // last-value-wins semantics. sort.Stable(tmp) *tmp = nil position := len(kvs) - 1 offset := position - 1 // The requirements stated above require that the stable // result be placed in the end of the input slice, while // overwritten values are swapped to the beginning. // // De-duplicate with last-value-wins semantics. Preserve // duplicate values at the beginning of the input slice. for ; offset >= 0; offset-- { if kvs[offset].Key == kvs[position].Key { continue } position-- kvs[offset], kvs[position] = kvs[position], kvs[offset] } if filter != nil { return filterSet(kvs[position:], filter) } return Set{ equivalent: computeDistinct(kvs[position:]), }, nil } // filterSet reorders kvs so that included keys are contiguous at the end of // the slice, while excluded keys precede the included keys. func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { var excluded []KeyValue // Move attributes that do not match the filter so they're adjacent before // calling computeDistinct(). distinctPosition := len(kvs) // Swap indistinct keys forward and distinct keys toward the // end of the slice. offset := len(kvs) - 1 for ; offset >= 0; offset-- { if filter(kvs[offset]) { distinctPosition-- kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] continue } } excluded = kvs[:distinctPosition] return Set{ equivalent: computeDistinct(kvs[distinctPosition:]), }, excluded } // Filter returns a filtered copy of this Set. See the documentation for // NewSetWithSortableFiltered for more details. func (l *Set) Filter(re Filter) (Set, []KeyValue) { if re == nil { return Set{ equivalent: l.equivalent, }, nil } // Note: This could be refactored to avoid the temporary slice // allocation, if it proves to be expensive. return filterSet(l.ToSlice(), re) } // computeDistinct returns a Distinct using either the fixed- or // reflect-oriented code path, depending on the size of the input. The input // slice is assumed to already be sorted and de-duplicated. func computeDistinct(kvs []KeyValue) Distinct { iface := computeDistinctFixed(kvs) if iface == nil { iface = computeDistinctReflect(kvs) } return Distinct{ iface: iface, } } // computeDistinctFixed computes a Distinct for small slices. It returns nil // if the input is too large for this code path. func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: ptr := new([1]KeyValue) copy((*ptr)[:], kvs) return *ptr case 2: ptr := new([2]KeyValue) copy((*ptr)[:], kvs) return *ptr case 3: ptr := new([3]KeyValue) copy((*ptr)[:], kvs) return *ptr case 4: ptr := new([4]KeyValue) copy((*ptr)[:], kvs) return *ptr case 5: ptr := new([5]KeyValue) copy((*ptr)[:], kvs) return *ptr case 6: ptr := new([6]KeyValue) copy((*ptr)[:], kvs) return *ptr case 7: ptr := new([7]KeyValue) copy((*ptr)[:], kvs) return *ptr case 8: ptr := new([8]KeyValue) copy((*ptr)[:], kvs) return *ptr case 9: ptr := new([9]KeyValue) copy((*ptr)[:], kvs) return *ptr case 10: ptr := new([10]KeyValue) copy((*ptr)[:], kvs) return *ptr default: return nil } } // computeDistinctReflect computes a Distinct using reflection, works for any // size input. func computeDistinctReflect(kvs []KeyValue) interface{} { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue } return at.Interface() } // MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { return json.Marshal(l.equivalent.iface) } // MarshalLog is the marshaling function used by the logging system to represent this exporter. func (l Set) MarshalLog() interface{} { kvs := make(map[string]string) for _, kv := range l.ToSlice() { kvs[string(kv.Key)] = kv.Value.Emit() } return kvs } // Len implements sort.Interface. func (l *Sortable) Len() int { return len(*l) } // Swap implements sort.Interface. func (l *Sortable) Swap(i, j int) { (*l)[i], (*l)[j] = (*l)[j], (*l)[i] } // Less implements sort.Interface. func (l *Sortable) Less(i, j int) bool { return (*l)[i].Key < (*l)[j].Key } opentelemetry-go-1.21.0/attribute/set_test.go000066400000000000000000000144111452547353200212710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute_test import ( "reflect" "regexp" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" ) type testCase struct { kvs []attribute.KeyValue keyRe *regexp.Regexp encoding string fullEnc string } func expect(enc string, kvs ...attribute.KeyValue) testCase { return testCase{ kvs: kvs, encoding: enc, } } func expectFiltered(enc, filter, fullEnc string, kvs ...attribute.KeyValue) testCase { return testCase{ kvs: kvs, keyRe: regexp.MustCompile(filter), encoding: enc, fullEnc: fullEnc, } } func TestSetDedup(t *testing.T) { cases := []testCase{ expect("A=B", attribute.String("A", "2"), attribute.String("A", "B")), expect("A=B", attribute.String("A", "2"), attribute.Int("A", 1), attribute.String("A", "B")), expect("A=B", attribute.String("A", "B"), attribute.String("A", "C"), attribute.String("A", "D"), attribute.String("A", "B")), expect("A=B,C=D", attribute.String("A", "1"), attribute.String("C", "D"), attribute.String("A", "B")), expect("A=B,C=D", attribute.String("A", "2"), attribute.String("A", "B"), attribute.String("C", "D")), expect("A=B,C=D", attribute.Float64("C", 1.2), attribute.String("A", "2"), attribute.String("A", "B"), attribute.String("C", "D")), expect("A=B,C=D", attribute.String("C", "D"), attribute.String("A", "B"), attribute.String("A", "C"), attribute.String("A", "D"), attribute.String("A", "B")), expect("A=B,C=D", attribute.String("A", "B"), attribute.String("C", "D"), attribute.String("A", "C"), attribute.String("A", "D"), attribute.String("A", "B")), expect("A=B,C=D", attribute.String("A", "B"), attribute.String("A", "C"), attribute.String("A", "D"), attribute.String("A", "B"), attribute.String("C", "D")), } enc := attribute.DefaultEncoder() s2d := map[string][]attribute.Distinct{} d2s := map[attribute.Distinct][]string{} for _, tc := range cases { cpy := make([]attribute.KeyValue, len(tc.kvs)) copy(cpy, tc.kvs) sl := attribute.NewSet(cpy...) // Ensure that the input was reordered but no elements went missing. require.ElementsMatch(t, tc.kvs, cpy) str := sl.Encoded(enc) equ := sl.Equivalent() s2d[str] = append(s2d[str], equ) d2s[equ] = append(d2s[equ], str) require.Equal(t, tc.encoding, str) } for s, d := range s2d { // No other Distinct values are equal to this. for s2, d2 := range s2d { if s2 == s { continue } for _, elt := range d { for _, otherDistinct := range d2 { require.NotEqual(t, otherDistinct, elt) } } } for _, strings := range d2s { if strings[0] == s { continue } for _, otherString := range strings { require.NotEqual(t, otherString, s) } } } for d, s := range d2s { // No other Distinct values are equal to this. for d2, s2 := range d2s { if d2 == d { continue } for _, elt := range s { for _, otherDistinct := range s2 { require.NotEqual(t, otherDistinct, elt) } } } for _, distincts := range s2d { if distincts[0] == d { continue } for _, otherDistinct := range distincts { require.NotEqual(t, otherDistinct, d) } } } } func TestUniqueness(t *testing.T) { short := []attribute.KeyValue{ attribute.String("A", "0"), attribute.String("B", "2"), attribute.String("A", "1"), } long := []attribute.KeyValue{ attribute.String("B", "2"), attribute.String("C", "5"), attribute.String("B", "2"), attribute.String("C", "1"), attribute.String("A", "4"), attribute.String("C", "3"), attribute.String("A", "1"), } cases := []testCase{ expectFiltered("A=1", "^A$", "B=2", short...), expectFiltered("B=2", "^B$", "A=1", short...), expectFiltered("A=1,B=2", "^A|B$", "", short...), expectFiltered("", "^C", "A=1,B=2", short...), expectFiltered("A=1,C=3", "A|C", "B=2", long...), expectFiltered("B=2,C=3", "C|B", "A=1", long...), expectFiltered("C=3", "C", "A=1,B=2", long...), expectFiltered("", "D", "A=1,B=2,C=3", long...), } enc := attribute.DefaultEncoder() for _, tc := range cases { cpy := make([]attribute.KeyValue, len(tc.kvs)) copy(cpy, tc.kvs) distinct, uniq := attribute.NewSetWithFiltered(cpy, func(attr attribute.KeyValue) bool { return tc.keyRe.MatchString(string(attr.Key)) }) full := attribute.NewSet(uniq...) require.Equal(t, tc.encoding, distinct.Encoded(enc)) require.Equal(t, tc.fullEnc, full.Encoded(enc)) } } func TestLookup(t *testing.T) { set := attribute.NewSet(attribute.Int("C", 3), attribute.Int("A", 1), attribute.Int("B", 2)) value, has := set.Value("C") require.True(t, has) require.Equal(t, int64(3), value.AsInt64()) value, has = set.Value("B") require.True(t, has) require.Equal(t, int64(2), value.AsInt64()) value, has = set.Value("A") require.True(t, has) require.Equal(t, int64(1), value.AsInt64()) _, has = set.Value("D") require.False(t, has) } func TestZeroSetExportedMethodsNoPanic(t *testing.T) { rType := reflect.TypeOf((*attribute.Set)(nil)) rVal := reflect.ValueOf(&attribute.Set{}) for n := 0; n < rType.NumMethod(); n++ { mType := rType.Method(n) if !mType.IsExported() { t.Logf("ignoring unexported %s", mType.Name) continue } t.Run(mType.Name, func(t *testing.T) { m := rVal.MethodByName(mType.Name) if !m.IsValid() { t.Errorf("unknown method: %s", mType.Name) } assert.NotPanics(t, func() { _ = m.Call(args(mType)) }) }) } } func args(m reflect.Method) []reflect.Value { numIn := m.Type.NumIn() - 1 // Do not include the receiver arg. if numIn <= 0 { return nil } if m.Type.IsVariadic() { numIn-- } out := make([]reflect.Value, numIn) for i := range out { aType := m.Type.In(i + 1) // Skip receiver arg. out[i] = reflect.New(aType).Elem() } return out } opentelemetry-go-1.21.0/attribute/type_string.go000066400000000000000000000014401452547353200220040ustar00rootroot00000000000000// Code generated by "stringer -type=Type"; DO NOT EDIT. package attribute import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[INVALID-0] _ = x[BOOL-1] _ = x[INT64-2] _ = x[FLOAT64-3] _ = x[STRING-4] _ = x[BOOLSLICE-5] _ = x[INT64SLICE-6] _ = x[FLOAT64SLICE-7] _ = x[STRINGSLICE-8] } const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} func (i Type) String() string { if i < 0 || i >= Type(len(_Type_index)-1) { return "Type(" + strconv.FormatInt(int64(i), 10) + ")" } return _Type_name[_Type_index[i]:_Type_index[i+1]] } opentelemetry-go-1.21.0/attribute/value.go000066400000000000000000000145511452547353200205600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" "reflect" "strconv" "go.opentelemetry.io/otel/internal" "go.opentelemetry.io/otel/internal/attribute" ) //go:generate stringer -type=Type // Type describes the type of the data Value holds. type Type int // nolint: revive // redefines builtin Type. // Value represents the value part in key-value pairs. type Value struct { vtype Type numeric uint64 stringly string slice interface{} } const ( // INVALID is used for a Value with no value set. INVALID Type = iota // BOOL is a boolean Type Value. BOOL // INT64 is a 64-bit signed integral Type Value. INT64 // FLOAT64 is a 64-bit floating point Type Value. FLOAT64 // STRING is a string Type Value. STRING // BOOLSLICE is a slice of booleans Type Value. BOOLSLICE // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. INT64SLICE // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. FLOAT64SLICE // STRINGSLICE is a slice of strings Type Value. STRINGSLICE ) // BoolValue creates a BOOL Value. func BoolValue(v bool) Value { return Value{ vtype: BOOL, numeric: internal.BoolToRaw(v), } } // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} } // IntValue creates an INT64 Value. func IntValue(v int) Value { return Int64Value(int64(v)) } // IntSliceValue creates an INTSLICE Value. func IntSliceValue(v []int) Value { var int64Val int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) for i, val := range v { cp.Elem().Index(i).SetInt(int64(val)) } return Value{ vtype: INT64SLICE, slice: cp.Elem().Interface(), } } // Int64Value creates an INT64 Value. func Int64Value(v int64) Value { return Value{ vtype: INT64, numeric: internal.Int64ToRaw(v), } } // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} } // Float64Value creates a FLOAT64 Value. func Float64Value(v float64) Value { return Value{ vtype: FLOAT64, numeric: internal.Float64ToRaw(v), } } // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} } // StringValue creates a STRING Value. func StringValue(v string) Value { return Value{ vtype: STRING, stringly: v, } } // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} } // Type returns a type of the Value. func (v Value) Type() Type { return v.vtype } // AsBool returns the bool value. Make sure that the Value's type is // BOOL. func (v Value) AsBool() bool { return internal.RawToBool(v.numeric) } // AsBoolSlice returns the []bool value. Make sure that the Value's type is // BOOLSLICE. func (v Value) AsBoolSlice() []bool { if v.vtype != BOOLSLICE { return nil } return v.asBoolSlice() } func (v Value) asBoolSlice() []bool { return attribute.AsBoolSlice(v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is // INT64. func (v Value) AsInt64() int64 { return internal.RawToInt64(v.numeric) } // AsInt64Slice returns the []int64 value. Make sure that the Value's type is // INT64SLICE. func (v Value) AsInt64Slice() []int64 { if v.vtype != INT64SLICE { return nil } return v.asInt64Slice() } func (v Value) asInt64Slice() []int64 { return attribute.AsInt64Slice(v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's // type is FLOAT64. func (v Value) AsFloat64() float64 { return internal.RawToFloat64(v.numeric) } // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is // FLOAT64SLICE. func (v Value) AsFloat64Slice() []float64 { if v.vtype != FLOAT64SLICE { return nil } return v.asFloat64Slice() } func (v Value) asFloat64Slice() []float64 { return attribute.AsFloat64Slice(v.slice) } // AsString returns the string value. Make sure that the Value's type // is STRING. func (v Value) AsString() string { return v.stringly } // AsStringSlice returns the []string value. Make sure that the Value's type is // STRINGSLICE. func (v Value) AsStringSlice() []string { if v.vtype != STRINGSLICE { return nil } return v.asStringSlice() } func (v Value) asStringSlice() []string { return attribute.AsStringSlice(v.slice) } type unknownValueType struct{} // AsInterface returns Value's data as interface{}. func (v Value) AsInterface() interface{} { switch v.Type() { case BOOL: return v.AsBool() case BOOLSLICE: return v.asBoolSlice() case INT64: return v.AsInt64() case INT64SLICE: return v.asInt64Slice() case FLOAT64: return v.AsFloat64() case FLOAT64SLICE: return v.asFloat64Slice() case STRING: return v.stringly case STRINGSLICE: return v.asStringSlice() } return unknownValueType{} } // Emit returns a string representation of Value's data. func (v Value) Emit() string { switch v.Type() { case BOOLSLICE: return fmt.Sprint(v.asBoolSlice()) case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: return fmt.Sprint(v.asInt64Slice()) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: return fmt.Sprint(v.asFloat64Slice()) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: return fmt.Sprint(v.asStringSlice()) case STRING: return v.stringly default: return "unknown" } } // MarshalJSON returns the JSON encoding of the Value. func (v Value) MarshalJSON() ([]byte, error) { var jsonVal struct { Type string Value interface{} } jsonVal.Type = v.Type().String() jsonVal.Value = v.AsInterface() return json.Marshal(jsonVal) } opentelemetry-go-1.21.0/attribute/value_test.go000066400000000000000000000127401452547353200216150ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute_test import ( "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" ) func TestValue(t *testing.T) { k := attribute.Key("test") for _, testcase := range []struct { name string value attribute.Value wantType attribute.Type wantValue interface{} }{ { name: "Key.Bool() correctly returns keys's internal bool value", value: k.Bool(true).Value, wantType: attribute.BOOL, wantValue: true, }, { name: "Key.BoolSlice() correctly returns keys's internal []bool value", value: k.BoolSlice([]bool{true, false, true}).Value, wantType: attribute.BOOLSLICE, wantValue: []bool{true, false, true}, }, { name: "Key.Int64() correctly returns keys's internal int64 value", value: k.Int64(42).Value, wantType: attribute.INT64, wantValue: int64(42), }, { name: "Key.Int64Slice() correctly returns keys's internal []int64 value", value: k.Int64Slice([]int64{42, -3, 12}).Value, wantType: attribute.INT64SLICE, wantValue: []int64{42, -3, 12}, }, { name: "Key.Int() correctly returns keys's internal signed integral value", value: k.Int(42).Value, wantType: attribute.INT64, wantValue: int64(42), }, { name: "Key.IntSlice() correctly returns keys's internal []int64 value", value: k.IntSlice([]int{42, -3, 12}).Value, wantType: attribute.INT64SLICE, wantValue: []int64{42, -3, 12}, }, { name: "Key.Float64() correctly returns keys's internal float64 value", value: k.Float64(42.1).Value, wantType: attribute.FLOAT64, wantValue: 42.1, }, { name: "Key.Float64Slice() correctly returns keys's internal []float64 value", value: k.Float64Slice([]float64{42, -3, 12}).Value, wantType: attribute.FLOAT64SLICE, wantValue: []float64{42, -3, 12}, }, { name: "Key.String() correctly returns keys's internal string value", value: k.String("foo").Value, wantType: attribute.STRING, wantValue: "foo", }, { name: "Key.StringSlice() correctly returns keys's internal []string value", value: k.StringSlice([]string{"forty-two", "negative three", "twelve"}).Value, wantType: attribute.STRINGSLICE, wantValue: []string{"forty-two", "negative three", "twelve"}, }, } { t.Logf("Running test case %s", testcase.name) if testcase.value.Type() != testcase.wantType { t.Errorf("wrong value type, got %#v, expected %#v", testcase.value.Type(), testcase.wantType) } if testcase.wantType == attribute.INVALID { continue } got := testcase.value.AsInterface() if diff := cmp.Diff(testcase.wantValue, got); diff != "" { t.Errorf("+got, -want: %s", diff) } } } func TestSetComparability(t *testing.T) { pairs := [][2]attribute.KeyValue{ { attribute.Bool("Bool", true), attribute.Bool("Bool", true), }, { attribute.BoolSlice("BoolSlice", []bool{true, false, true}), attribute.BoolSlice("BoolSlice", []bool{true, false, true}), }, { attribute.Int("Int", 34), attribute.Int("Int", 34), }, { attribute.IntSlice("IntSlice", []int{312, 1, -2}), attribute.IntSlice("IntSlice", []int{312, 1, -2}), }, { attribute.Int64("Int64", 98), attribute.Int64("Int64", 98), }, { attribute.Int64Slice("Int64Slice", []int64{12, 1298, -219, 2}), attribute.Int64Slice("Int64Slice", []int64{12, 1298, -219, 2}), }, { attribute.Float64("Float64", 19.09), attribute.Float64("Float64", 19.09), }, { attribute.Float64Slice("Float64Slice", []float64{12398.1, -37.1713873737, 3}), attribute.Float64Slice("Float64Slice", []float64{12398.1, -37.1713873737, 3}), }, { attribute.String("String", "string value"), attribute.String("String", "string value"), }, { attribute.StringSlice("StringSlice", []string{"one", "two", "three"}), attribute.StringSlice("StringSlice", []string{"one", "two", "three"}), }, } for _, p := range pairs { s0, s1 := attribute.NewSet(p[0]), attribute.NewSet(p[1]) m := map[attribute.Set]struct{}{s0: {}} _, ok := m[s1] assert.Truef(t, ok, "%s not comparable", p[0].Value.Type()) } } func TestAsSlice(t *testing.T) { bs1 := []bool{true, false, true} kv := attribute.BoolSlice("BoolSlice", bs1) bs2 := kv.Value.AsBoolSlice() assert.Equal(t, bs1, bs2) i64s1 := []int64{12, 1298, -219, 2} kv = attribute.Int64Slice("Int64Slice", i64s1) i64s2 := kv.Value.AsInt64Slice() assert.Equal(t, i64s1, i64s2) is1 := []int{12, 1298, -219, 2} kv = attribute.IntSlice("IntSlice", is1) i64s2 = kv.Value.AsInt64Slice() assert.Equal(t, i64s1, i64s2) fs1 := []float64{12398.1, -37.1713873737, 3} kv = attribute.Float64Slice("Float64Slice", fs1) fs2 := kv.Value.AsFloat64Slice() assert.Equal(t, fs1, fs2) ss1 := []string{"one", "two", "three"} kv = attribute.StringSlice("StringSlice", ss1) ss2 := kv.Value.AsStringSlice() assert.Equal(t, ss1, ss2) } opentelemetry-go-1.21.0/baggage/000077500000000000000000000000001452547353200164615ustar00rootroot00000000000000opentelemetry-go-1.21.0/baggage/baggage.go000066400000000000000000000353771452547353200204040ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package baggage // import "go.opentelemetry.io/otel/baggage" import ( "errors" "fmt" "net/url" "regexp" "strings" "go.opentelemetry.io/otel/internal/baggage" ) const ( maxMembers = 180 maxBytesPerMembers = 4096 maxBytesPerBaggageString = 8192 listDelimiter = "," keyValueDelimiter = "=" propertyDelimiter = ";" keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` ) var ( keyRe = regexp.MustCompile(`^` + keyDef + `$`) valueRe = regexp.MustCompile(`^` + valueDef + `$`) propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) ) var ( errInvalidKey = errors.New("invalid key") errInvalidValue = errors.New("invalid value") errInvalidProperty = errors.New("invalid baggage list-member property") errInvalidMember = errors.New("invalid baggage list-member") errMemberNumber = errors.New("too many list-members in baggage-string") errMemberBytes = errors.New("list-member too large") errBaggageBytes = errors.New("baggage-string too large") ) // Property is an additional metadata entry for a baggage list-member. type Property struct { key, value string // hasValue indicates if a zero-value value means the property does not // have a value or if it was the zero-value. hasValue bool } // NewKeyProperty returns a new Property for key. // // If key is invalid, an error will be returned. func NewKeyProperty(key string) (Property, error) { if !keyRe.MatchString(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } p := Property{key: key} return p, nil } // NewKeyValueProperty returns a new Property for key with value. // // If key or value are invalid, an error will be returned. func NewKeyValueProperty(key, value string) (Property, error) { if !keyRe.MatchString(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } if !valueRe.MatchString(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } p := Property{ key: key, value: value, hasValue: true, } return p, nil } func newInvalidProperty() Property { return Property{} } // parseProperty attempts to decode a Property from the passed string. It // returns an error if the input is invalid according to the W3C Baggage // specification. func parseProperty(property string) (Property, error) { if property == "" { return newInvalidProperty(), nil } match := propertyRe.FindStringSubmatch(property) if len(match) != 4 { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) } var p Property if match[1] != "" { p.key = match[1] } else { p.key = match[2] p.value = match[3] p.hasValue = true } return p, nil } // validate ensures p conforms to the W3C Baggage specification, returning an // error otherwise. func (p Property) validate() error { errFunc := func(err error) error { return fmt.Errorf("invalid property: %w", err) } if !keyRe.MatchString(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if p.hasValue && !valueRe.MatchString(p.value) { return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } return nil } // Key returns the Property key. func (p Property) Key() string { return p.key } // Value returns the Property value. Additionally, a boolean value is returned // indicating if the returned value is the empty if the Property has a value // that is empty or if the value is not set. func (p Property) Value() (string, bool) { return p.value, p.hasValue } // String encodes Property into a string compliant with the W3C Baggage // specification. func (p Property) String() string { if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) } return p.key } type properties []Property func fromInternalProperties(iProps []baggage.Property) properties { if len(iProps) == 0 { return nil } props := make(properties, len(iProps)) for i, p := range iProps { props[i] = Property{ key: p.Key, value: p.Value, hasValue: p.HasValue, } } return props } func (p properties) asInternal() []baggage.Property { if len(p) == 0 { return nil } iProps := make([]baggage.Property, len(p)) for i, prop := range p { iProps[i] = baggage.Property{ Key: prop.key, Value: prop.value, HasValue: prop.hasValue, } } return iProps } func (p properties) Copy() properties { if len(p) == 0 { return nil } props := make(properties, len(p)) copy(props, p) return props } // validate ensures each Property in p conforms to the W3C Baggage // specification, returning an error otherwise. func (p properties) validate() error { for _, prop := range p { if err := prop.validate(); err != nil { return err } } return nil } // String encodes properties into a string compliant with the W3C Baggage // specification. func (p properties) String() string { props := make([]string, len(p)) for i, prop := range p { props[i] = prop.String() } return strings.Join(props, propertyDelimiter) } // Member is a list-member of a baggage-string as defined by the W3C Baggage // specification. type Member struct { key, value string properties properties // hasData indicates whether the created property contains data or not. // Properties that do not contain data are invalid with no other check // required. hasData bool } // NewMember returns a new Member from the passed arguments. The key will be // used directly while the value will be url decoded after validation. An error // is returned if the created Member would be invalid according to the W3C // Baggage specification. func NewMember(key, value string, props ...Property) (Member, error) { m := Member{ key: key, value: value, properties: properties(props).Copy(), hasData: true, } if err := m.validate(); err != nil { return newInvalidMember(), err } decodedValue, err := url.PathUnescape(value) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } m.value = decodedValue return m, nil } func newInvalidMember() Member { return Member{} } // parseMember attempts to decode a Member from the passed string. It returns // an error if the input is invalid according to the W3C Baggage // specification. func parseMember(member string) (Member, error) { if n := len(member); n > maxBytesPerMembers { return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) } var ( key, value string props properties ) keyValue, properties, found := strings.Cut(member, propertyDelimiter) if found { // Parse the member properties. for _, pStr := range strings.Split(properties, propertyDelimiter) { p, err := parseProperty(pStr) if err != nil { return newInvalidMember(), err } props = append(props, p) } } // Parse the member key/value pair. // Take into account a value can contain equal signs (=). k, v, found := strings.Cut(keyValue, keyValueDelimiter) if !found { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) } // "Leading and trailing whitespaces are allowed but MUST be trimmed // when converting the header into a data structure." key = strings.TrimSpace(k) var err error value, err = url.PathUnescape(strings.TrimSpace(v)) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %q", err, value) } if !keyRe.MatchString(key) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } if !valueRe.MatchString(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } return Member{key: key, value: value, properties: props, hasData: true}, nil } // validate ensures m conforms to the W3C Baggage specification. // A key is just an ASCII string, but a value must be URL encoded UTF-8, // returning an error otherwise. func (m Member) validate() error { if !m.hasData { return fmt.Errorf("%w: %q", errInvalidMember, m) } if !keyRe.MatchString(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } if !valueRe.MatchString(m.value) { return fmt.Errorf("%w: %q", errInvalidValue, m.value) } return m.properties.validate() } // Key returns the Member key. func (m Member) Key() string { return m.key } // Value returns the Member value. func (m Member) Value() string { return m.value } // Properties returns a copy of the Member properties. func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a string compliant with the W3C Baggage // specification. func (m Member) String() string { // A key is just an ASCII string, but a value is URL encoded UTF-8. s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) if len(m.properties) > 0 { s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) } return s } // Baggage is a list of baggage members representing the baggage-string as // defined by the W3C Baggage specification. type Baggage struct { //nolint:golint list baggage.List } // New returns a new valid Baggage. It returns an error if it results in a // Baggage exceeding limits set in that specification. // // It expects all the provided members to have already been validated. func New(members ...Member) (Baggage, error) { if len(members) == 0 { return Baggage{}, nil } b := make(baggage.List) for _, m := range members { if !m.hasData { return Baggage{}, errInvalidMember } // OpenTelemetry resolves duplicates by last-one-wins. b[m.key] = baggage.Item{ Value: m.value, Properties: m.properties.asInternal(), } } // Check member numbers after deduplication. if len(b) > maxMembers { return Baggage{}, errMemberNumber } bag := Baggage{b} if n := len(bag.String()); n > maxBytesPerBaggageString { return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) } return bag, nil } // Parse attempts to decode a baggage-string from the passed string. It // returns an error if the input is invalid according to the W3C Baggage // specification. // // If there are duplicate list-members contained in baggage, the last one // defined (reading left-to-right) will be the only one kept. This diverges // from the W3C Baggage specification which allows duplicate list-members, but // conforms to the OpenTelemetry Baggage specification. func Parse(bStr string) (Baggage, error) { if bStr == "" { return Baggage{}, nil } if n := len(bStr); n > maxBytesPerBaggageString { return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) } b := make(baggage.List) for _, memberStr := range strings.Split(bStr, listDelimiter) { m, err := parseMember(memberStr) if err != nil { return Baggage{}, err } // OpenTelemetry resolves duplicates by last-one-wins. b[m.key] = baggage.Item{ Value: m.value, Properties: m.properties.asInternal(), } } // OpenTelemetry does not allow for duplicate list-members, but the W3C // specification does. Now that we have deduplicated, ensure the baggage // does not exceed list-member limits. if len(b) > maxMembers { return Baggage{}, errMemberNumber } return Baggage{b}, nil } // Member returns the baggage list-member identified by key. // // If there is no list-member matching the passed key the returned Member will // be a zero-value Member. // The returned member is not validated, as we assume the validation happened // when it was added to the Baggage. func (b Baggage) Member(key string) Member { v, ok := b.list[key] if !ok { // We do not need to worry about distinguishing between the situation // where a zero-valued Member is included in the Baggage because a // zero-valued Member is invalid according to the W3C Baggage // specification (it has an empty key). return newInvalidMember() } return Member{ key: key, value: v.Value, properties: fromInternalProperties(v.Properties), hasData: true, } } // Members returns all the baggage list-members. // The order of the returned list-members does not have significance. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. func (b Baggage) Members() []Member { if len(b.list) == 0 { return nil } members := make([]Member, 0, len(b.list)) for k, v := range b.list { members = append(members, Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), hasData: true, }) } return members } // SetMember returns a copy the Baggage with the member included. If the // baggage contains a Member with the same key the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error // is returned with the original Baggage. func (b Baggage) SetMember(member Member) (Baggage, error) { if !member.hasData { return b, errInvalidMember } n := len(b.list) if _, ok := b.list[member.key]; !ok { n++ } list := make(baggage.List, n) for k, v := range b.list { // Do not copy if we are just going to overwrite. if k == member.key { continue } list[k] = v } list[member.key] = baggage.Item{ Value: member.value, Properties: member.properties.asInternal(), } return Baggage{list: list}, nil } // DeleteMember returns a copy of the Baggage with the list-member identified // by key removed. func (b Baggage) DeleteMember(key string) Baggage { n := len(b.list) if _, ok := b.list[key]; ok { n-- } list := make(baggage.List, n) for k, v := range b.list { if k == key { continue } list[k] = v } return Baggage{list: list} } // Len returns the number of list-members in the Baggage. func (b Baggage) Len() int { return len(b.list) } // String encodes Baggage into a string compliant with the W3C Baggage // specification. The returned string will be invalid if the Baggage contains // any invalid list-members. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { members = append(members, Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), }.String()) } return strings.Join(members, listDelimiter) } opentelemetry-go-1.21.0/baggage/baggage_test.go000066400000000000000000000505741452547353200214370ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package baggage import ( "fmt" "math/rand" "sort" "strings" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/internal/baggage" ) var rng *rand.Rand func init() { // Seed with a static value to ensure deterministic results. rng = rand.New(rand.NewSource(1)) } func TestKeyRegExp(t *testing.T) { // ASCII only invalidKeyRune := []rune{ '\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07', '\x08', '\x09', '\x0A', '\x0B', '\x0C', '\x0D', '\x0E', '\x0F', '\x10', '\x11', '\x12', '\x13', '\x14', '\x15', '\x16', '\x17', '\x18', '\x19', '\x1A', '\x1B', '\x1C', '\x1D', '\x1E', '\x1F', ' ', '(', ')', '<', '>', '@', ',', ';', ':', '\\', '"', '/', '[', ']', '?', '=', '{', '}', '\x7F', } for _, ch := range invalidKeyRune { assert.NotRegexp(t, keyDef, fmt.Sprintf("%c", ch)) } } func TestValueRegExp(t *testing.T) { // ASCII only invalidValueRune := []rune{ '\x00', '\x01', '\x02', '\x03', '\x04', '\x05', '\x06', '\x07', '\x08', '\x09', '\x0A', '\x0B', '\x0C', '\x0D', '\x0E', '\x0F', '\x10', '\x11', '\x12', '\x13', '\x14', '\x15', '\x16', '\x17', '\x18', '\x19', '\x1A', '\x1B', '\x1C', '\x1D', '\x1E', '\x1F', ' ', '"', ',', ';', '\\', '\x7F', } for _, ch := range invalidValueRune { assert.NotRegexp(t, `^`+valueDef+`$`, fmt.Sprintf("invalid-%c-value", ch)) } } func TestParseProperty(t *testing.T) { p := Property{key: "key", value: "value", hasValue: true} testcases := []struct { in string expected Property }{ { in: "", expected: Property{}, }, { in: "key", expected: Property{ key: "key", }, }, { in: "key=", expected: Property{ key: "key", hasValue: true, }, }, { in: "key=value", expected: p, }, { in: " key=value ", expected: p, }, { in: "key = value", expected: p, }, { in: " key = value ", expected: p, }, { in: "\tkey=value", expected: p, }, } for _, tc := range testcases { actual, err := parseProperty(tc.in) if !assert.NoError(t, err) { continue } assert.Equal(t, tc.expected.Key(), actual.Key(), tc.in) actualV, actualOk := actual.Value() expectedV, expectedOk := tc.expected.Value() assert.Equal(t, expectedOk, actualOk, tc.in) assert.Equal(t, expectedV, actualV, tc.in) } } func TestParsePropertyError(t *testing.T) { _, err := parseProperty(",;,") assert.ErrorIs(t, err, errInvalidProperty) } func TestNewKeyProperty(t *testing.T) { p, err := NewKeyProperty(" ") assert.ErrorIs(t, err, errInvalidKey) assert.Equal(t, Property{}, p) p, err = NewKeyProperty("key") assert.NoError(t, err) assert.Equal(t, Property{key: "key"}, p) } func TestNewKeyValueProperty(t *testing.T) { p, err := NewKeyValueProperty(" ", "") assert.ErrorIs(t, err, errInvalidKey) assert.Equal(t, Property{}, p) p, err = NewKeyValueProperty("key", ";") assert.ErrorIs(t, err, errInvalidValue) assert.Equal(t, Property{}, p) p, err = NewKeyValueProperty("key", "value") assert.NoError(t, err) assert.Equal(t, Property{key: "key", value: "value", hasValue: true}, p) } func TestPropertyValidate(t *testing.T) { p := Property{} assert.ErrorIs(t, p.validate(), errInvalidKey) p.key = "k" assert.NoError(t, p.validate()) p.value = ";" assert.EqualError(t, p.validate(), "invalid property: inconsistent value") p.hasValue = true assert.ErrorIs(t, p.validate(), errInvalidValue) p.value = "v" assert.NoError(t, p.validate()) } func TestNewEmptyBaggage(t *testing.T) { b, err := New() assert.NoError(t, err) assert.Equal(t, Baggage{}, b) } func TestNewBaggage(t *testing.T) { b, err := New(Member{key: "k", hasData: true}) assert.NoError(t, err) assert.Equal(t, Baggage{list: baggage.List{"k": {}}}, b) } func TestNewBaggageWithDuplicates(t *testing.T) { // Having this many members would normally cause this to error, but since // these are duplicates of the same key they will be collapsed into a // single entry. m := make([]Member, maxMembers+1) for i := range m { // Duplicates are collapsed. m[i] = Member{ key: "a", value: fmt.Sprintf("%d", i), hasData: true, } } b, err := New(m...) assert.NoError(t, err) // Ensure that the last-one-wins by verifying the value. v := fmt.Sprintf("%d", maxMembers) want := Baggage{list: baggage.List{"a": {Value: v}}} assert.Equal(t, want, b) } func TestNewBaggageErrorEmptyMember(t *testing.T) { _, err := New(Member{}) assert.ErrorIs(t, err, errInvalidMember) } func key(n int) string { r := []rune("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]rune, n) for i := range b { b[i] = r[rng.Intn(len(r))] } return string(b) } func TestNewBaggageErrorTooManyBytes(t *testing.T) { m := make([]Member, (maxBytesPerBaggageString/maxBytesPerMembers)+1) for i := range m { m[i] = Member{key: key(maxBytesPerMembers), hasData: true} } _, err := New(m...) assert.ErrorIs(t, err, errBaggageBytes) } func TestNewBaggageErrorTooManyMembers(t *testing.T) { m := make([]Member, maxMembers+1) for i := range m { m[i] = Member{key: fmt.Sprintf("%d", i), hasData: true} } _, err := New(m...) assert.ErrorIs(t, err, errMemberNumber) } func TestBaggageParse(t *testing.T) { tooLarge := key(maxBytesPerBaggageString + 1) tooLargeMember := key(maxBytesPerMembers + 1) m := make([]string, maxMembers+1) for i := range m { m[i] = fmt.Sprintf("a%d=", i) } tooManyMembers := strings.Join(m, listDelimiter) testcases := []struct { name string in string want baggage.List err error }{ { name: "empty value", in: "", want: baggage.List(nil), }, { name: "single member empty value no properties", in: "foo=", want: baggage.List{ "foo": {Value: ""}, }, }, { name: "single member no properties", in: "foo=1", want: baggage.List{ "foo": {Value: "1"}, }, }, { name: "single member no properties plus", in: "foo=1+1", want: baggage.List{ "foo": {Value: "1+1"}, }, }, { name: "single member no properties plus encoded", in: "foo=1%2B1", want: baggage.List{ "foo": {Value: "1+1"}, }, }, { name: "single member no properties slash", in: "foo=1/1", want: baggage.List{ "foo": {Value: "1/1"}, }, }, { name: "single member no properties slash encoded", in: "foo=1%2F1", want: baggage.List{ "foo": {Value: "1/1"}, }, }, { name: "single member no properties equals", in: "foo=1=1", want: baggage.List{ "foo": {Value: "1=1"}, }, }, { name: "single member no properties equals encoded", in: "foo=1%3D1", want: baggage.List{ "foo": {Value: "1=1"}, }, }, { name: "single member with spaces", in: " foo \t= 1\t\t ", want: baggage.List{ "foo": {Value: "1"}, }, }, { name: "single member empty value with properties", in: "foo=;state=on;red", want: baggage.List{ "foo": { Value: "", Properties: []baggage.Property{ {Key: "state", Value: "on", HasValue: true}, {Key: "red"}, }, }, }, }, { name: "single member with properties", in: "foo=1;state=on;red", want: baggage.List{ "foo": { Value: "1", Properties: []baggage.Property{ {Key: "state", Value: "on", HasValue: true}, {Key: "red"}, }, }, }, }, { name: "single member with value containing equal signs", in: "foo=0=0=0", want: baggage.List{ "foo": {Value: "0=0=0"}, }, }, { name: "two members with properties", in: "foo=1;state=on;red,bar=2;yellow", want: baggage.List{ "foo": { Value: "1", Properties: []baggage.Property{ {Key: "state", Value: "on", HasValue: true}, {Key: "red"}, }, }, "bar": { Value: "2", Properties: []baggage.Property{{Key: "yellow"}}, }, }, }, { // According to the OTel spec, last value wins. name: "duplicate key", in: "foo=1;state=on;red,foo=2", want: baggage.List{ "foo": {Value: "2"}, }, }, { name: "url encoded value", in: "key1=val%252", want: baggage.List{ "key1": {Value: "val%2"}, }, }, { name: "invalid member: empty", in: "foo=,,bar=", err: errInvalidMember, }, { name: "invalid member: no key", in: "=foo", err: errInvalidKey, }, { name: "invalid member: no value", in: "foo", err: errInvalidMember, }, { name: "invalid member: invalid key", in: "\\=value", err: errInvalidKey, }, { name: "invalid member: invalid value", in: "foo=\\", err: errInvalidValue, }, { name: "invalid property: invalid key", in: "foo=1;=v", err: errInvalidProperty, }, { name: "invalid property: invalid value", in: "foo=1;key=\\", err: errInvalidProperty, }, { name: "invalid baggage string: too large", in: tooLarge, err: errBaggageBytes, }, { name: "invalid baggage string: member too large", in: tooLargeMember, err: errMemberBytes, }, { name: "invalid baggage string: too many members", in: tooManyMembers, err: errMemberNumber, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { actual, err := Parse(tc.in) assert.ErrorIs(t, err, tc.err) assert.Equal(t, Baggage{list: tc.want}, actual) }) } } func TestBaggageString(t *testing.T) { testcases := []struct { name string out string baggage baggage.List }{ { name: "empty value", out: "", baggage: baggage.List(nil), }, { name: "single member empty value no properties", out: "foo=", baggage: baggage.List{ "foo": {Value: ""}, }, }, { name: "single member no properties", out: "foo=1", baggage: baggage.List{ "foo": {Value: "1"}, }, }, { name: "URL encoded value", out: "foo=1%3D1", baggage: baggage.List{ "foo": {Value: "1=1"}, }, }, { name: "plus", out: "foo=1%2B1", baggage: baggage.List{ "foo": {Value: "1+1"}, }, }, { name: "single member empty value with properties", out: "foo=;red;state=on", baggage: baggage.List{ "foo": { Value: "", Properties: []baggage.Property{ {Key: "state", Value: "on", HasValue: true}, {Key: "red"}, }, }, }, }, { name: "single member with properties", // Properties are "opaque values" meaning they are sent as they // are set and no encoding is performed. out: "foo=1;red;state=on;z=z=z", baggage: baggage.List{ "foo": { Value: "1", Properties: []baggage.Property{ {Key: "state", Value: "on", HasValue: true}, {Key: "red"}, {Key: "z", Value: "z=z", HasValue: true}, }, }, }, }, { name: "two members with properties", out: "bar=2;yellow,foo=1;red;state=on", baggage: baggage.List{ "foo": { Value: "1", Properties: []baggage.Property{ {Key: "state", Value: "on", HasValue: true}, {Key: "red"}, }, }, "bar": { Value: "2", Properties: []baggage.Property{{Key: "yellow"}}, }, }, }, } orderer := func(s string) string { members := strings.Split(s, listDelimiter) for i, m := range members { parts := strings.Split(m, propertyDelimiter) if len(parts) > 1 { sort.Strings(parts[1:]) members[i] = strings.Join(parts, propertyDelimiter) } } sort.Strings(members) return strings.Join(members, listDelimiter) } for _, tc := range testcases { b := Baggage{tc.baggage} assert.Equal(t, tc.out, orderer(b.String())) } } func TestBaggageLen(t *testing.T) { b := Baggage{} assert.Equal(t, 0, b.Len()) b.list = make(baggage.List, 1) assert.Equal(t, 0, b.Len()) b.list["k"] = baggage.Item{} assert.Equal(t, 1, b.Len()) } func TestBaggageDeleteMember(t *testing.T) { key := "k" b0 := Baggage{} b1 := b0.DeleteMember(key) assert.NotContains(t, b1.list, key) b0 = Baggage{list: baggage.List{ key: {}, "other": {}, }} b1 = b0.DeleteMember(key) assert.Contains(t, b0.list, key) assert.NotContains(t, b1.list, key) } func TestBaggageSetMemberEmpty(t *testing.T) { _, err := Baggage{}.SetMember(Member{}) assert.ErrorIs(t, err, errInvalidMember) } func TestBaggageSetMember(t *testing.T) { b0 := Baggage{} key := "k" m := Member{key: key, hasData: true} b1, err := b0.SetMember(m) assert.NoError(t, err) assert.NotContains(t, b0.list, key) assert.Equal(t, baggage.Item{}, b1.list[key]) assert.Equal(t, 0, len(b0.list)) assert.Equal(t, 1, len(b1.list)) m.value = "v" b2, err := b1.SetMember(m) assert.NoError(t, err) assert.Equal(t, baggage.Item{}, b1.list[key]) assert.Equal(t, baggage.Item{Value: "v"}, b2.list[key]) assert.Equal(t, 1, len(b1.list)) assert.Equal(t, 1, len(b2.list)) p := properties{{key: "p"}} m.properties = p b3, err := b2.SetMember(m) assert.NoError(t, err) assert.Equal(t, baggage.Item{Value: "v"}, b2.list[key]) assert.Equal(t, baggage.Item{Value: "v", Properties: []baggage.Property{{Key: "p"}}}, b3.list[key]) assert.Equal(t, 1, len(b2.list)) assert.Equal(t, 1, len(b3.list)) // The returned baggage needs to be immutable and should use a copy of the // properties slice. p[0] = Property{key: "different"} assert.Equal(t, baggage.Item{Value: "v", Properties: []baggage.Property{{Key: "p"}}}, b3.list[key]) // Reset for below. p[0] = Property{key: "p"} m = Member{key: "another", hasData: true} b4, err := b3.SetMember(m) assert.NoError(t, err) assert.Equal(t, baggage.Item{Value: "v", Properties: []baggage.Property{{Key: "p"}}}, b3.list[key]) assert.NotContains(t, b3.list, m.key) assert.Equal(t, baggage.Item{Value: "v", Properties: []baggage.Property{{Key: "p"}}}, b4.list[key]) assert.Equal(t, baggage.Item{}, b4.list[m.key]) assert.Equal(t, 1, len(b3.list)) assert.Equal(t, 2, len(b4.list)) } func TestBaggageSetFalseMember(t *testing.T) { b0 := Baggage{} key := "k" m := Member{key: key, hasData: false} b1, err := b0.SetMember(m) assert.Error(t, err) assert.NotContains(t, b0.list, key) assert.Equal(t, baggage.Item{}, b1.list[key]) assert.Equal(t, 0, len(b0.list)) assert.Equal(t, 0, len(b1.list)) m.value = "v" b2, err := b1.SetMember(m) assert.Error(t, err) assert.Equal(t, baggage.Item{}, b1.list[key]) assert.Equal(t, baggage.Item{Value: ""}, b2.list[key]) assert.Equal(t, 0, len(b1.list)) assert.Equal(t, 0, len(b2.list)) } func TestBaggageSetFalseMembers(t *testing.T) { b0 := Baggage{} key := "k" m := Member{key: key, hasData: true} b1, err := b0.SetMember(m) assert.NoError(t, err) assert.NotContains(t, b0.list, key) assert.Equal(t, baggage.Item{}, b1.list[key]) assert.Equal(t, 0, len(b0.list)) assert.Equal(t, 1, len(b1.list)) m.value = "v" b2, err := b1.SetMember(m) assert.NoError(t, err) assert.Equal(t, baggage.Item{}, b1.list[key]) assert.Equal(t, baggage.Item{Value: "v"}, b2.list[key]) assert.Equal(t, 1, len(b1.list)) assert.Equal(t, 1, len(b2.list)) p := properties{{key: "p"}} m.properties = p b3, err := b2.SetMember(m) assert.NoError(t, err) assert.Equal(t, baggage.Item{Value: "v"}, b2.list[key]) assert.Equal(t, baggage.Item{Value: "v", Properties: []baggage.Property{{Key: "p"}}}, b3.list[key]) assert.Equal(t, 1, len(b2.list)) assert.Equal(t, 1, len(b3.list)) // The returned baggage needs to be immutable and should use a copy of the // properties slice. p[0] = Property{key: "different"} assert.Equal(t, baggage.Item{Value: "v", Properties: []baggage.Property{{Key: "p"}}}, b3.list[key]) // Reset for below. p[0] = Property{key: "p"} m = Member{key: "another"} b4, err := b3.SetMember(m) assert.Error(t, err) assert.Equal(t, baggage.Item{Value: "v", Properties: []baggage.Property{{Key: "p"}}}, b3.list[key]) assert.NotContains(t, b3.list, m.key) assert.Equal(t, baggage.Item{Value: "v", Properties: []baggage.Property{{Key: "p"}}}, b4.list[key]) assert.Equal(t, baggage.Item{}, b4.list[m.key]) assert.Equal(t, 1, len(b3.list)) assert.Equal(t, 1, len(b4.list)) } func TestNilBaggageMembers(t *testing.T) { assert.Nil(t, Baggage{}.Members()) } func TestBaggageMembers(t *testing.T) { members := []Member{ { key: "foo", value: "1", properties: properties{ {key: "state", value: "on", hasValue: true}, {key: "red"}, }, hasData: true, }, { key: "bar", value: "2", properties: properties{ {key: "yellow"}, }, hasData: true, }, } bag := Baggage{list: baggage.List{ "foo": { Value: "1", Properties: []baggage.Property{ {Key: "state", Value: "on", HasValue: true}, {Key: "red"}, }, }, "bar": { Value: "2", Properties: []baggage.Property{{Key: "yellow"}}, }, }} assert.ElementsMatch(t, members, bag.Members()) } func TestBaggageMember(t *testing.T) { bag := Baggage{list: baggage.List{"foo": {Value: "1"}}} assert.Equal(t, Member{key: "foo", value: "1", hasData: true}, bag.Member("foo")) assert.Equal(t, Member{}, bag.Member("bar")) } func TestMemberKey(t *testing.T) { m := Member{} assert.Equal(t, "", m.Key(), "even invalid values should be returned") key := "k" m.key = key assert.Equal(t, key, m.Key()) } func TestMemberValue(t *testing.T) { m := Member{key: "k", value: "\\"} assert.Equal(t, "\\", m.Value(), "even invalid values should be returned") value := "v" m.value = value assert.Equal(t, value, m.Value()) } func TestMemberProperties(t *testing.T) { m := Member{key: "k", value: "v"} assert.Nil(t, m.Properties()) p := []Property{{key: "foo"}} m.properties = properties(p) got := m.Properties() assert.Equal(t, p, got) // Returned slice needs to be a copy so the original is immutable. got[0] = Property{key: "bar"} assert.NotEqual(t, m.properties, got) } func TestMemberValidation(t *testing.T) { m := Member{hasData: false} assert.ErrorIs(t, m.validate(), errInvalidMember) m.hasData = true assert.ErrorIs(t, m.validate(), errInvalidKey) m.key, m.value = "k", "\\" assert.ErrorIs(t, m.validate(), errInvalidValue) m.value = "v" assert.NoError(t, m.validate()) } func TestNewMember(t *testing.T) { m, err := NewMember("", "") assert.ErrorIs(t, err, errInvalidKey) assert.Equal(t, Member{hasData: false}, m) key, val := "k", "v" p := Property{key: "foo"} m, err = NewMember(key, val, p) assert.NoError(t, err) expected := Member{ key: key, value: val, properties: properties{{key: "foo"}}, hasData: true, } assert.Equal(t, expected, m) // wrong value with wrong decoding val = "%zzzzz" _, err = NewMember(key, val, p) assert.ErrorIs(t, err, errInvalidValue) // value should be decoded val = "%3B" m, err = NewMember(key, val, p) expected = Member{ key: key, value: ";", properties: properties{{key: "foo"}}, hasData: true, } assert.NoError(t, err) assert.Equal(t, expected, m) // Ensure new member is immutable. p.key = "bar" assert.Equal(t, expected, m) } func TestPropertiesValidate(t *testing.T) { p := properties{{}} assert.ErrorIs(t, p.validate(), errInvalidKey) p[0].key = "foo" assert.NoError(t, p.validate()) p = append(p, Property{key: "bar"}) assert.NoError(t, p.validate()) } func TestMemberString(t *testing.T) { // normal key value pair member, _ := NewMember("key", "value") memberStr := member.String() assert.Equal(t, memberStr, "key=value") // encoded key member, _ = NewMember("key", "%3B") memberStr = member.String() assert.Equal(t, memberStr, "key=%3B") } var benchBaggage Baggage func BenchmarkNew(b *testing.B) { mem1, _ := NewMember("key1", "val1") mem2, _ := NewMember("key2", "val2") mem3, _ := NewMember("key3", "val3") mem4, _ := NewMember("key4", "val4") b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { benchBaggage, _ = New(mem1, mem2, mem3, mem4) } } var benchMember Member func BenchmarkNewMember(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { benchMember, _ = NewMember("key", "value") } } func BenchmarkParse(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { benchBaggage, _ = Parse(`userId=alice,serverNode = DF28 , isProduction = false,hasProp=stuff;propKey;propWValue=value`) } } opentelemetry-go-1.21.0/baggage/context.go000066400000000000000000000026571452547353200205060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package baggage // import "go.opentelemetry.io/otel/baggage" import ( "context" "go.opentelemetry.io/otel/internal/baggage" ) // ContextWithBaggage returns a copy of parent with baggage. func ContextWithBaggage(parent context.Context, b Baggage) context.Context { // Delegate so any hooks for the OpenTracing bridge are handled. return baggage.ContextWithList(parent, b.list) } // ContextWithoutBaggage returns a copy of parent with no baggage. func ContextWithoutBaggage(parent context.Context) context.Context { // Delegate so any hooks for the OpenTracing bridge are handled. return baggage.ContextWithList(parent, nil) } // FromContext returns the baggage contained in ctx. func FromContext(ctx context.Context) Baggage { // Delegate so any hooks for the OpenTracing bridge are handled. return Baggage{list: baggage.ListFromContext(ctx)} } opentelemetry-go-1.21.0/baggage/context_test.go000066400000000000000000000020531452547353200215330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package baggage import ( "context" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/internal/baggage" ) func TestContext(t *testing.T) { ctx := context.Background() assert.Equal(t, Baggage{}, FromContext(ctx)) b := Baggage{list: baggage.List{"key": baggage.Item{Value: "val"}}} ctx = ContextWithBaggage(ctx, b) assert.Equal(t, b, FromContext(ctx)) ctx = ContextWithoutBaggage(ctx) assert.Equal(t, Baggage{}, FromContext(ctx)) } opentelemetry-go-1.21.0/baggage/doc.go000066400000000000000000000015131452547353200175550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package baggage provides functionality for storing and retrieving baggage items in Go context. For propagating the baggage, see the go.opentelemetry.io/otel/propagation package. */ package baggage // import "go.opentelemetry.io/otel/baggage" opentelemetry-go-1.21.0/bridge/000077500000000000000000000000001452547353200163405ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opencensus/000077500000000000000000000000001452547353200205225ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opencensus/config.go000066400000000000000000000036061452547353200223230ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" ) const scopeName = "go.opentelemetry.io/otel/bridge/opencensus" // newTraceConfig returns a config configured with options. func newTraceConfig(options []TraceOption) traceConfig { conf := traceConfig{tp: otel.GetTracerProvider()} for _, o := range options { conf = o.apply(conf) } return conf } type traceConfig struct { tp trace.TracerProvider } // TraceOption applies a configuration option value to an OpenCensus bridge // Tracer. type TraceOption interface { apply(traceConfig) traceConfig } // traceOptionFunc applies a set of options to a config. type traceOptionFunc func(traceConfig) traceConfig // apply returns a config with option(s) applied. func (o traceOptionFunc) apply(conf traceConfig) traceConfig { return o(conf) } // WithTracerProvider specifies a tracer provider to use for creating a tracer. func WithTracerProvider(tp trace.TracerProvider) TraceOption { return traceOptionFunc(func(conf traceConfig) traceConfig { conf.tp = tp return conf }) } type metricConfig struct{} // MetricOption applies a configuration option value to an OpenCensus bridge // MetricProducer. type MetricOption interface { apply(metricConfig) metricConfig } opentelemetry-go-1.21.0/bridge/opencensus/config_test.go000066400000000000000000000025731452547353200233640ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace/noop" ) func TestNewTraceConfig(t *testing.T) { globalTP := noop.NewTracerProvider() customTP := noop.NewTracerProvider() otel.SetTracerProvider(globalTP) for _, tc := range []struct { desc string opts []TraceOption expected traceConfig }{ { desc: "default", expected: traceConfig{ tp: globalTP, }, }, { desc: "overridden", opts: []TraceOption{ WithTracerProvider(customTP), }, expected: traceConfig{ tp: customTP, }, }, } { t.Run(tc.desc, func(t *testing.T) { cfg := newTraceConfig(tc.opts) assert.Equal(t, tc.expected, cfg) }) } } opentelemetry-go-1.21.0/bridge/opencensus/doc.go000066400000000000000000000057061452547353200216260ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package opencensus provides a migration bridge from OpenCensus to // OpenTelemetry for metrics and traces. The bridge incorporates metrics and // traces from OpenCensus into the OpenTelemetry SDK, combining them with // metrics and traces from OpenTelemetry instrumentation. // // # Migration Guide // // For most applications, it would be difficult to migrate an application // from OpenCensus to OpenTelemetry all-at-once. Libraries used by the // application may still be using OpenCensus, and the application itself may // have many lines of instrumentation. // // Bridges help in this situation by allowing your application to have "mixed" // instrumentation, while incorporating all instrumentation into a single // export path. To migrate with bridges, a user would: // // 1. Configure the OpenTelemetry SDK for metrics and traces, with the OpenTelemetry exporters matching to your current OpenCensus exporters. // 2. Install this OpenCensus bridge, which sends OpenCensus telemetry to your new OpenTelemetry exporters. // 3. Over time, migrate your instrumentation from OpenCensus to OpenTelemetry. // 4. Once all instrumentation is migrated, remove the OpenCensus bridge. // // With this approach, you can migrate your telemetry, including in dependent // libraries over time without disruption. // // # Warnings // // Installing a metric or tracing bridge will cause OpenCensus telemetry to be // exported by OpenTelemetry exporters. Since OpenCensus telemetry uses globals, // installing a bridge will result in telemetry collection from _all_ libraries // that use OpenCensus, including some you may not expect, such as the // telemetry exporter itself. // // # Limitations // // There are known limitations to the trace bridge: // // - The AddLink method for OpenCensus Spans is ignored, and an error is sent // to the OpenTelemetry ErrorHandler. // - The NewContext method of the OpenCensus Tracer cannot embed an OpenCensus // Span in a context unless that Span was created by that Tracer. // - Conversion of custom OpenCensus Samplers to OpenTelemetry is not // implemented, and An error will be sent to the OpenTelemetry ErrorHandler. // // There are known limitations to the metric bridge: // - GaugeDistribution-typed metrics are dropped // - Histogram's SumOfSquaredDeviation field is dropped package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" opentelemetry-go-1.21.0/bridge/opencensus/example_test.go000066400000000000000000000024331452547353200235450ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opencensus_test import ( "go.opentelemetry.io/otel/bridge/opencensus" "go.opentelemetry.io/otel/sdk/metric" ) func ExampleNewMetricProducer() { // Create the OpenCensus Metric bridge. bridge := opencensus.NewMetricProducer() // Add the bridge as a producer to your reader. // If using a push exporter, such as OTLP exporter, // use metric.NewPeriodicReader with metric.WithProducer option. // If using a pull exporter which acts as a reader, such as prometheus exporter, // use a dedicated option like prometheus.WithProducer. reader := metric.NewManualReader(metric.WithProducer(bridge)) // Add the reader to your MeterProvider. _ = metric.NewMeterProvider(metric.WithReader(reader)) } opentelemetry-go-1.21.0/bridge/opencensus/go.mod000066400000000000000000000020601452547353200216260ustar00rootroot00000000000000module go.opentelemetry.io/otel/bridge/opencensus go 1.20 require ( github.com/stretchr/testify v1.8.4 go.opencensus.io v0.24.0 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/kr/pretty v0.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel => ../.. replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/sdk => ../../sdk replace go.opentelemetry.io/otel/metric => ../../metric replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric opentelemetry-go-1.21.0/bridge/opencensus/go.sum000066400000000000000000000253451452547353200216660ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= opentelemetry-go-1.21.0/bridge/opencensus/internal/000077500000000000000000000000001452547353200223365ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opencensus/internal/handler.go000066400000000000000000000015041452547353200243020ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/bridge/opencensus/internal" import "go.opentelemetry.io/otel" // Handle is the package level function to handle errors. It can be // overwritten for testing. var Handle = otel.Handle opentelemetry-go-1.21.0/bridge/opencensus/internal/oc2otel/000077500000000000000000000000001452547353200237055ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opencensus/internal/oc2otel/attributes.go000066400000000000000000000025421452547353200264250ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oc2otel // import "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" import ( octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/attribute" ) func Attributes(attr []octrace.Attribute) []attribute.KeyValue { otelAttr := make([]attribute.KeyValue, len(attr)) for i, a := range attr { otelAttr[i] = attribute.KeyValue{ Key: attribute.Key(a.Key()), Value: AttributeValue(a.Value()), } } return otelAttr } func AttributeValue(ocval interface{}) attribute.Value { switch v := ocval.(type) { case bool: return attribute.BoolValue(v) case int64: return attribute.Int64Value(v) case float64: return attribute.Float64Value(v) case string: return attribute.StringValue(v) default: return attribute.StringValue("unknown") } } opentelemetry-go-1.21.0/bridge/opencensus/internal/oc2otel/attributes_test.go000066400000000000000000000030341452547353200274610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oc2otel import ( "testing" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/attribute" ) func TestAttributes(t *testing.T) { in := []octrace.Attribute{ octrace.BoolAttribute("bool", true), octrace.Int64Attribute("int64", 49), octrace.Float64Attribute("float64", 1.618), octrace.StringAttribute("key", "val"), } want := []attribute.KeyValue{ attribute.Bool("bool", true), attribute.Int64("int64", 49), attribute.Float64("float64", 1.618), attribute.String("key", "val"), } got := Attributes(in) if len(got) != len(want) { t.Errorf("Attributes conversion failed: want %#v, got %#v", want, got) } for i := range got { if g, w := got[i], want[i]; g != w { t.Errorf("Attributes conversion: want %#v, got %#v", w, g) } } } func TestAttributeValueUnknown(t *testing.T) { got := AttributeValue([]byte{}) if got != attribute.StringValue("unknown") { t.Errorf("AttributeValue of unknown wrong: %#v", got) } } opentelemetry-go-1.21.0/bridge/opencensus/internal/oc2otel/span_context.go000066400000000000000000000020761452547353200267460ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oc2otel // import "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" import ( octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/trace" ) func SpanContext(sc octrace.SpanContext) trace.SpanContext { var traceFlags trace.TraceFlags if sc.IsSampled() { traceFlags = trace.FlagsSampled } return trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID(sc.TraceID), SpanID: trace.SpanID(sc.SpanID), TraceFlags: traceFlags, }) } opentelemetry-go-1.21.0/bridge/opencensus/internal/oc2otel/span_context_test.go000066400000000000000000000043511452547353200300030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oc2otel import ( "testing" octrace "go.opencensus.io/trace" "go.opencensus.io/trace/tracestate" "go.opentelemetry.io/otel/trace" ) func TestSpanContextConversion(t *testing.T) { for _, tc := range []struct { description string input octrace.SpanContext expected trace.SpanContext }{ { description: "empty", }, { description: "sampled", input: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{1}), SpanID: octrace.SpanID([8]byte{2}), TraceOptions: octrace.TraceOptions(0x1), }, expected: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID([16]byte{1}), SpanID: trace.SpanID([8]byte{2}), TraceFlags: trace.FlagsSampled, }), }, { description: "not sampled", input: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{1}), SpanID: octrace.SpanID([8]byte{2}), TraceOptions: octrace.TraceOptions(0), }, expected: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID([16]byte{1}), SpanID: trace.SpanID([8]byte{2}), }), }, { description: "trace state is ignored", input: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{1}), SpanID: octrace.SpanID([8]byte{2}), Tracestate: &tracestate.Tracestate{}, }, expected: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID([16]byte{1}), SpanID: trace.SpanID([8]byte{2}), }), }, } { t.Run(tc.description, func(t *testing.T) { output := SpanContext(tc.input) if !output.Equal(tc.expected) { t.Fatalf("Got %+v spancontext, expected %+v.", output, tc.expected) } }) } } opentelemetry-go-1.21.0/bridge/opencensus/internal/oc2otel/tracer_start_options.go000066400000000000000000000026631452547353200305130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oc2otel // import "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" import ( "fmt" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/trace" ) func StartOptions(optFuncs []octrace.StartOption) ([]trace.SpanStartOption, error) { var ocOpts octrace.StartOptions for _, fn := range optFuncs { fn(&ocOpts) } var otelOpts []trace.SpanStartOption switch ocOpts.SpanKind { case octrace.SpanKindClient: otelOpts = append(otelOpts, trace.WithSpanKind(trace.SpanKindClient)) case octrace.SpanKindServer: otelOpts = append(otelOpts, trace.WithSpanKind(trace.SpanKindServer)) case octrace.SpanKindUnspecified: otelOpts = append(otelOpts, trace.WithSpanKind(trace.SpanKindUnspecified)) } var err error if ocOpts.Sampler != nil { err = fmt.Errorf("unsupported sampler: %v", ocOpts.Sampler) } return otelOpts, err } opentelemetry-go-1.21.0/bridge/opencensus/internal/oc2otel/tracer_start_options_test.go000066400000000000000000000030441452547353200315440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oc2otel import ( "testing" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/trace" ) func TestStartOptionsSpanKind(t *testing.T) { conv := map[int]trace.SpanKind{ octrace.SpanKindClient: trace.SpanKindClient, octrace.SpanKindServer: trace.SpanKindServer, octrace.SpanKindUnspecified: trace.SpanKindUnspecified, } for oc, otel := range conv { ocOpts := []octrace.StartOption{octrace.WithSpanKind(oc)} otelOpts, err := StartOptions(ocOpts) if err != nil { t.Errorf("StartOptions errored: %v", err) continue } c := trace.NewSpanStartConfig(otelOpts...) if c.SpanKind() != otel { t.Errorf("conversion of SpanKind start option: got %v, want %v", c.SpanKind(), otel) } } } func TestStartOptionsSamplerErrors(t *testing.T) { ocOpts := []octrace.StartOption{octrace.WithSampler(octrace.AlwaysSample())} _, err := StartOptions(ocOpts) if err == nil { t.Error("StartOptions should error Sampler option") } } opentelemetry-go-1.21.0/bridge/opencensus/internal/ocmetric/000077500000000000000000000000001452547353200241435ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opencensus/internal/ocmetric/metric.go000066400000000000000000000336631452547353200257700ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric" import ( "errors" "fmt" "math" "reflect" "sort" "strconv" ocmetricdata "go.opencensus.io/metric/metricdata" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) var ( errAggregationType = errors.New("unsupported OpenCensus aggregation type") errMismatchedValueTypes = errors.New("wrong value type for data point") errNegativeCount = errors.New("distribution or summary count is negative") errNegativeBucketCount = errors.New("distribution bucket count is negative") errMismatchedAttributeKeyValues = errors.New("mismatched number of attribute keys and values") errInvalidExemplarSpanContext = errors.New("span context exemplar attachment does not contain an OpenCensus SpanContext") ) // ConvertMetrics converts metric data from OpenCensus to OpenTelemetry. func ConvertMetrics(ocmetrics []*ocmetricdata.Metric) ([]metricdata.Metrics, error) { otelMetrics := make([]metricdata.Metrics, 0, len(ocmetrics)) var err error for _, ocm := range ocmetrics { if ocm == nil { continue } agg, aggregationErr := convertAggregation(ocm) if aggregationErr != nil { err = errors.Join(err, fmt.Errorf("error converting metric %v: %w", ocm.Descriptor.Name, aggregationErr)) continue } otelMetrics = append(otelMetrics, metricdata.Metrics{ Name: ocm.Descriptor.Name, Description: ocm.Descriptor.Description, Unit: string(ocm.Descriptor.Unit), Data: agg, }) } if err != nil { return otelMetrics, fmt.Errorf("error converting from OpenCensus to OpenTelemetry: %w", err) } return otelMetrics, nil } // convertAggregation produces an aggregation based on the OpenCensus Metric. func convertAggregation(metric *ocmetricdata.Metric) (metricdata.Aggregation, error) { labelKeys := metric.Descriptor.LabelKeys switch metric.Descriptor.Type { case ocmetricdata.TypeGaugeInt64: return convertGauge[int64](labelKeys, metric.TimeSeries) case ocmetricdata.TypeGaugeFloat64: return convertGauge[float64](labelKeys, metric.TimeSeries) case ocmetricdata.TypeCumulativeInt64: return convertSum[int64](labelKeys, metric.TimeSeries) case ocmetricdata.TypeCumulativeFloat64: return convertSum[float64](labelKeys, metric.TimeSeries) case ocmetricdata.TypeCumulativeDistribution: return convertHistogram(labelKeys, metric.TimeSeries) case ocmetricdata.TypeSummary: return convertSummary(labelKeys, metric.TimeSeries) } return nil, fmt.Errorf("%w: %q", errAggregationType, metric.Descriptor.Type) } // convertGauge converts an OpenCensus gauge to an OpenTelemetry gauge aggregation. func convertGauge[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Gauge[N], error) { points, err := convertNumberDataPoints[N](labelKeys, ts) return metricdata.Gauge[N]{DataPoints: points}, err } // convertSum converts an OpenCensus cumulative to an OpenTelemetry sum aggregation. func convertSum[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Sum[N], error) { points, err := convertNumberDataPoints[N](labelKeys, ts) // OpenCensus sums are always Cumulative return metricdata.Sum[N]{DataPoints: points, Temporality: metricdata.CumulativeTemporality, IsMonotonic: true}, err } // convertNumberDataPoints converts OpenCensus TimeSeries to OpenTelemetry DataPoints. func convertNumberDataPoints[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) ([]metricdata.DataPoint[N], error) { var points []metricdata.DataPoint[N] var err error for _, t := range ts { attrs, attrsErr := convertAttrs(labelKeys, t.LabelValues) if attrsErr != nil { err = errors.Join(err, attrsErr) continue } for _, p := range t.Points { v, ok := p.Value.(N) if !ok { err = errors.Join(err, fmt.Errorf("%w: %q", errMismatchedValueTypes, p.Value)) continue } points = append(points, metricdata.DataPoint[N]{ Attributes: attrs, StartTime: t.StartTime, Time: p.Time, Value: v, }) } } return points, err } // convertHistogram converts OpenCensus Distribution timeseries to an // OpenTelemetry Histogram aggregation. func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Histogram[float64], error) { points := make([]metricdata.HistogramDataPoint[float64], 0, len(ts)) var err error for _, t := range ts { attrs, attrsErr := convertAttrs(labelKeys, t.LabelValues) if attrsErr != nil { err = errors.Join(err, attrsErr) continue } for _, p := range t.Points { dist, ok := p.Value.(*ocmetricdata.Distribution) if !ok { err = errors.Join(err, fmt.Errorf("%w: %d", errMismatchedValueTypes, p.Value)) continue } bucketCounts, exemplars, bucketErr := convertBuckets(dist.Buckets) if bucketErr != nil { err = errors.Join(err, bucketErr) continue } if dist.Count < 0 { err = errors.Join(err, fmt.Errorf("%w: %d", errNegativeCount, dist.Count)) continue } points = append(points, metricdata.HistogramDataPoint[float64]{ Attributes: attrs, StartTime: t.StartTime, Time: p.Time, Count: uint64(dist.Count), Sum: dist.Sum, Bounds: dist.BucketOptions.Bounds, BucketCounts: bucketCounts, Exemplars: exemplars, }) } } return metricdata.Histogram[float64]{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, err } // convertBuckets converts from OpenCensus bucket counts to slice of uint64, // and converts OpenCensus exemplars to OpenTelemetry exemplars. func convertBuckets(buckets []ocmetricdata.Bucket) ([]uint64, []metricdata.Exemplar[float64], error) { bucketCounts := make([]uint64, len(buckets)) exemplars := []metricdata.Exemplar[float64]{} var err error for i, bucket := range buckets { if bucket.Count < 0 { err = errors.Join(err, fmt.Errorf("%w: %q", errNegativeBucketCount, bucket.Count)) continue } bucketCounts[i] = uint64(bucket.Count) if bucket.Exemplar != nil { exemplar, exemplarErr := convertExemplar(bucket.Exemplar) if exemplarErr != nil { err = errors.Join(err, exemplarErr) continue } exemplars = append(exemplars, exemplar) } } return bucketCounts, exemplars, err } // convertExemplar converts an OpenCensus exemplar to an OpenTelemetry exemplar. func convertExemplar(ocExemplar *ocmetricdata.Exemplar) (metricdata.Exemplar[float64], error) { exemplar := metricdata.Exemplar[float64]{ Value: ocExemplar.Value, Time: ocExemplar.Timestamp, } var err error for k, v := range ocExemplar.Attachments { switch { case k == ocmetricdata.AttachmentKeySpanContext: sc, ok := v.(octrace.SpanContext) if !ok { err = errors.Join(err, fmt.Errorf("%w; type: %v", errInvalidExemplarSpanContext, reflect.TypeOf(v))) continue } exemplar.SpanID = sc.SpanID[:] exemplar.TraceID = sc.TraceID[:] default: exemplar.FilteredAttributes = append(exemplar.FilteredAttributes, convertKV(k, v)) } } sortable := attribute.Sortable(exemplar.FilteredAttributes) sort.Sort(&sortable) return exemplar, err } // convertKV converts an OpenCensus Attachment to an OpenTelemetry KeyValue. func convertKV(key string, value any) attribute.KeyValue { switch typedVal := value.(type) { case bool: return attribute.Bool(key, typedVal) case int: return attribute.Int(key, typedVal) case int8: return attribute.Int(key, int(typedVal)) case int16: return attribute.Int(key, int(typedVal)) case int32: return attribute.Int(key, int(typedVal)) case int64: return attribute.Int64(key, typedVal) case uint: return uintKV(key, typedVal) case uint8: return uintKV(key, uint(typedVal)) case uint16: return uintKV(key, uint(typedVal)) case uint32: return uintKV(key, uint(typedVal)) case uintptr: return uint64KV(key, uint64(typedVal)) case uint64: return uint64KV(key, uint64(typedVal)) case float32: return attribute.Float64(key, float64(typedVal)) case float64: return attribute.Float64(key, typedVal) case complex64: return attribute.String(key, complexToString(typedVal)) case complex128: return attribute.String(key, complexToString(typedVal)) case string: return attribute.String(key, typedVal) case []bool: return attribute.BoolSlice(key, typedVal) case []int: return attribute.IntSlice(key, typedVal) case []int8: return intSliceKV(key, typedVal) case []int16: return intSliceKV(key, typedVal) case []int32: return intSliceKV(key, typedVal) case []int64: return attribute.Int64Slice(key, typedVal) case []uint: return uintSliceKV(key, typedVal) case []uint8: return uintSliceKV(key, typedVal) case []uint16: return uintSliceKV(key, typedVal) case []uint32: return uintSliceKV(key, typedVal) case []uintptr: return uintSliceKV(key, typedVal) case []uint64: return uintSliceKV(key, typedVal) case []float32: floatSlice := make([]float64, len(typedVal)) for i := range typedVal { floatSlice[i] = float64(typedVal[i]) } return attribute.Float64Slice(key, floatSlice) case []float64: return attribute.Float64Slice(key, typedVal) case []complex64: return complexSliceKV(key, typedVal) case []complex128: return complexSliceKV(key, typedVal) case []string: return attribute.StringSlice(key, typedVal) case fmt.Stringer: return attribute.Stringer(key, typedVal) default: return attribute.String(key, fmt.Sprintf("unhandled attribute value: %+v", value)) } } func intSliceKV[N int8 | int16 | int32](key string, val []N) attribute.KeyValue { intSlice := make([]int, len(val)) for i := range val { intSlice[i] = int(val[i]) } return attribute.IntSlice(key, intSlice) } func uintKV(key string, val uint) attribute.KeyValue { if val > uint(math.MaxInt) { return attribute.String(key, strconv.FormatUint(uint64(val), 10)) } return attribute.Int(key, int(val)) } func uintSliceKV[N uint | uint8 | uint16 | uint32 | uint64 | uintptr](key string, val []N) attribute.KeyValue { strSlice := make([]string, len(val)) for i := range val { strSlice[i] = strconv.FormatUint(uint64(val[i]), 10) } return attribute.StringSlice(key, strSlice) } func uint64KV(key string, val uint64) attribute.KeyValue { const maxInt64 = ^uint64(0) >> 1 if val > maxInt64 { return attribute.String(key, strconv.FormatUint(val, 10)) } return attribute.Int64(key, int64(val)) } func complexSliceKV[N complex64 | complex128](key string, val []N) attribute.KeyValue { strSlice := make([]string, len(val)) for i := range val { strSlice[i] = complexToString(val[i]) } return attribute.StringSlice(key, strSlice) } func complexToString[N complex64 | complex128](val N) string { return strconv.FormatComplex(complex128(val), 'f', -1, 64) } // convertSummary converts OpenCensus Summary timeseries to an // OpenTelemetry Summary. func convertSummary(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Summary, error) { points := make([]metricdata.SummaryDataPoint, 0, len(ts)) var err error for _, t := range ts { attrs, attrErr := convertAttrs(labelKeys, t.LabelValues) if attrErr != nil { err = errors.Join(err, attrErr) continue } for _, p := range t.Points { summary, ok := p.Value.(*ocmetricdata.Summary) if !ok { err = errors.Join(err, fmt.Errorf("%w: %d", errMismatchedValueTypes, p.Value)) continue } if summary.Count < 0 { err = errors.Join(err, fmt.Errorf("%w: %d", errNegativeCount, summary.Count)) continue } point := metricdata.SummaryDataPoint{ Attributes: attrs, StartTime: t.StartTime, Time: p.Time, Count: uint64(summary.Count), QuantileValues: convertQuantiles(summary.Snapshot), Sum: summary.Sum, } points = append(points, point) } } return metricdata.Summary{DataPoints: points}, err } // convertQuantiles converts an OpenCensus summary snapshot to // OpenTelemetry quantiles. func convertQuantiles(snapshot ocmetricdata.Snapshot) []metricdata.QuantileValue { quantileValues := make([]metricdata.QuantileValue, 0, len(snapshot.Percentiles)) for quantile, value := range snapshot.Percentiles { quantileValues = append(quantileValues, metricdata.QuantileValue{ // OpenCensus quantiles are range (0-100.0], but OpenTelemetry // quantiles are range [0.0, 1.0]. Quantile: quantile / 100.0, Value: value, }) } sort.Sort(byQuantile(quantileValues)) return quantileValues } // byQuantile implements sort.Interface for []metricdata.QuantileValue // based on the Quantile field. type byQuantile []metricdata.QuantileValue func (a byQuantile) Len() int { return len(a) } func (a byQuantile) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byQuantile) Less(i, j int) bool { return a[i].Quantile < a[j].Quantile } // convertAttrs converts from OpenCensus attribute keys and values to an // OpenTelemetry attribute Set. func convertAttrs(keys []ocmetricdata.LabelKey, values []ocmetricdata.LabelValue) (attribute.Set, error) { if len(keys) != len(values) { return attribute.NewSet(), fmt.Errorf("%w: keys(%q) values(%q)", errMismatchedAttributeKeyValues, len(keys), len(values)) } attrs := []attribute.KeyValue{} for i, lv := range values { if !lv.Present { continue } attrs = append(attrs, attribute.KeyValue{ Key: attribute.Key(keys[i].Key), Value: attribute.StringValue(lv.Value), }) } return attribute.NewSet(attrs...), nil } opentelemetry-go-1.21.0/bridge/opencensus/internal/ocmetric/metric_test.go000066400000000000000000001000261452547353200270130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric/internal" import ( "errors" "fmt" "math" "reflect" "testing" "time" "github.com/stretchr/testify/assert" ocmetricdata "go.opencensus.io/metric/metricdata" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" ) func TestConvertMetrics(t *testing.T) { endTime1 := time.Now() exemplarTime := endTime1.Add(-10 * time.Second) endTime2 := endTime1.Add(-time.Millisecond) startTime := endTime2.Add(-time.Minute) for _, tc := range []struct { desc string input []*ocmetricdata.Metric expected []metricdata.Metrics expectedErr error }{ { desc: "empty", expected: []metricdata.Metrics{}, }, { desc: "normal Histogram, summary, gauges, and sums", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/histogram-a", Description: "a testing histogram", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeCumulativeDistribution, LabelKeys: []ocmetricdata.LabelKey{ {Key: "a"}, {Key: "b"}, }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { LabelValues: []ocmetricdata.LabelValue{ { Value: "hello", Present: true, }, { Value: "world", Present: true, }, }, Points: []ocmetricdata.Point{ ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ Count: 8, Sum: 100.0, BucketOptions: &ocmetricdata.BucketOptions{ Bounds: []float64{1.0, 2.0, 3.0}, }, Buckets: []ocmetricdata.Bucket{ { Count: 1, Exemplar: &ocmetricdata.Exemplar{ Value: 0.8, Timestamp: exemplarTime, Attachments: map[string]interface{}{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{1}), SpanID: octrace.SpanID([8]byte{2}), }, "bool": true, }, }, }, { Count: 2, Exemplar: &ocmetricdata.Exemplar{ Value: 1.5, Timestamp: exemplarTime, Attachments: map[string]interface{}{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{3}), SpanID: octrace.SpanID([8]byte{4}), }, }, }, }, { Count: 5, Exemplar: &ocmetricdata.Exemplar{ Value: 2.6, Timestamp: exemplarTime, Attachments: map[string]interface{}{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{5}), SpanID: octrace.SpanID([8]byte{6}), }, }, }, }, }, }), ocmetricdata.NewDistributionPoint(endTime2, &ocmetricdata.Distribution{ Count: 10, Sum: 110.0, BucketOptions: &ocmetricdata.BucketOptions{ Bounds: []float64{1.0, 2.0, 3.0}, }, Buckets: []ocmetricdata.Bucket{ { Count: 1, Exemplar: &ocmetricdata.Exemplar{ Value: 0.9, Timestamp: exemplarTime, Attachments: map[string]interface{}{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{7}), SpanID: octrace.SpanID([8]byte{8}), }, }, }, }, { Count: 4, Exemplar: &ocmetricdata.Exemplar{ Value: 1.1, Timestamp: exemplarTime, Attachments: map[string]interface{}{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{9}), SpanID: octrace.SpanID([8]byte{10}), }, }, }, }, { Count: 5, Exemplar: &ocmetricdata.Exemplar{ Value: 2.7, Timestamp: exemplarTime, Attachments: map[string]interface{}{ ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{11}), SpanID: octrace.SpanID([8]byte{12}), }, }, }, }, }, }), }, StartTime: startTime, }, }, }, { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/gauge-a", Description: "an int testing gauge", Unit: ocmetricdata.UnitBytes, Type: ocmetricdata.TypeGaugeInt64, LabelKeys: []ocmetricdata.LabelKey{ {Key: "c"}, {Key: "d"}, }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { LabelValues: []ocmetricdata.LabelValue{ { Value: "foo", Present: true, }, { Value: "bar", Present: true, }, }, Points: []ocmetricdata.Point{ ocmetricdata.NewInt64Point(endTime1, 123), ocmetricdata.NewInt64Point(endTime2, 1236), }, }, }, }, { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/gauge-b", Description: "a float testing gauge", Unit: ocmetricdata.UnitBytes, Type: ocmetricdata.TypeGaugeFloat64, LabelKeys: []ocmetricdata.LabelKey{ {Key: "cf"}, {Key: "df"}, }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { LabelValues: []ocmetricdata.LabelValue{ { Value: "foof", Present: true, }, { Value: "barf", Present: true, }, }, Points: []ocmetricdata.Point{ ocmetricdata.NewFloat64Point(endTime1, 123.4), ocmetricdata.NewFloat64Point(endTime2, 1236.7), }, }, }, }, { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/sum-a", Description: "an int testing sum", Unit: ocmetricdata.UnitMilliseconds, Type: ocmetricdata.TypeCumulativeInt64, LabelKeys: []ocmetricdata.LabelKey{ {Key: "e"}, {Key: "f"}, }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { LabelValues: []ocmetricdata.LabelValue{ { Value: "zig", Present: true, }, { Value: "zag", Present: true, }, }, Points: []ocmetricdata.Point{ ocmetricdata.NewInt64Point(endTime1, 13), ocmetricdata.NewInt64Point(endTime2, 14), }, }, }, }, { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/sum-b", Description: "a float testing sum", Unit: ocmetricdata.UnitMilliseconds, Type: ocmetricdata.TypeCumulativeFloat64, LabelKeys: []ocmetricdata.LabelKey{ {Key: "e"}, {Key: "f"}, }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { LabelValues: []ocmetricdata.LabelValue{ { Value: "zig", Present: true, }, { Value: "zag", Present: true, }, }, Points: []ocmetricdata.Point{ ocmetricdata.NewFloat64Point(endTime1, 12.3), ocmetricdata.NewFloat64Point(endTime2, 123.4), }, }, }, }, { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/summary-a", Description: "a testing summary", Unit: ocmetricdata.UnitMilliseconds, Type: ocmetricdata.TypeSummary, LabelKeys: []ocmetricdata.LabelKey{ {Key: "g"}, {Key: "h"}, }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { LabelValues: []ocmetricdata.LabelValue{ { Value: "ding", Present: true, }, { Value: "dong", Present: true, }, }, Points: []ocmetricdata.Point{ ocmetricdata.NewSummaryPoint(endTime1, &ocmetricdata.Summary{ Count: 10, Sum: 13.2, HasCountAndSum: true, Snapshot: ocmetricdata.Snapshot{ Percentiles: map[float64]float64{ 50.0: 1.0, 0.0: 0.1, 100.0: 10.4, }, }, }), ocmetricdata.NewSummaryPoint(endTime2, &ocmetricdata.Summary{ Count: 12, Snapshot: ocmetricdata.Snapshot{ Percentiles: map[float64]float64{ 0.0: 0.2, 50.0: 1.1, 100.0: 10.5, }, }, }), }, }, }, }, }, expected: []metricdata.Metrics{ { Name: "foo.com/histogram-a", Description: "a testing histogram", Unit: "1", Data: metricdata.Histogram[float64]{ DataPoints: []metricdata.HistogramDataPoint[float64]{ { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("a"), Value: attribute.StringValue("hello"), }, attribute.KeyValue{ Key: attribute.Key("b"), Value: attribute.StringValue("world"), }), StartTime: startTime, Time: endTime1, Count: 8, Sum: 100.0, Bounds: []float64{1.0, 2.0, 3.0}, BucketCounts: []uint64{1, 2, 5}, Exemplars: []metricdata.Exemplar[float64]{ { Time: exemplarTime, Value: 0.8, TraceID: []byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, SpanID: []byte{2, 0, 0, 0, 0, 0, 0, 0}, FilteredAttributes: []attribute.KeyValue{ attribute.Bool("bool", true), }, }, { Time: exemplarTime, Value: 1.5, TraceID: []byte{3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, SpanID: []byte{4, 0, 0, 0, 0, 0, 0, 0}, }, { Time: exemplarTime, Value: 2.6, TraceID: []byte{5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, SpanID: []byte{6, 0, 0, 0, 0, 0, 0, 0}, }, }, }, { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("a"), Value: attribute.StringValue("hello"), }, attribute.KeyValue{ Key: attribute.Key("b"), Value: attribute.StringValue("world"), }), StartTime: startTime, Time: endTime2, Count: 10, Sum: 110.0, Bounds: []float64{1.0, 2.0, 3.0}, BucketCounts: []uint64{1, 4, 5}, Exemplars: []metricdata.Exemplar[float64]{ { Time: exemplarTime, Value: 0.9, TraceID: []byte{7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, SpanID: []byte{8, 0, 0, 0, 0, 0, 0, 0}, }, { Time: exemplarTime, Value: 1.1, TraceID: []byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, SpanID: []byte{10, 0, 0, 0, 0, 0, 0, 0}, }, { Time: exemplarTime, Value: 2.7, TraceID: []byte{11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, SpanID: []byte{12, 0, 0, 0, 0, 0, 0, 0}, }, }, }, }, Temporality: metricdata.CumulativeTemporality, }, }, { Name: "foo.com/gauge-a", Description: "an int testing gauge", Unit: "By", Data: metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("c"), Value: attribute.StringValue("foo"), }, attribute.KeyValue{ Key: attribute.Key("d"), Value: attribute.StringValue("bar"), }), Time: endTime1, Value: 123, }, { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("c"), Value: attribute.StringValue("foo"), }, attribute.KeyValue{ Key: attribute.Key("d"), Value: attribute.StringValue("bar"), }), Time: endTime2, Value: 1236, }, }, }, }, { Name: "foo.com/gauge-b", Description: "a float testing gauge", Unit: "By", Data: metricdata.Gauge[float64]{ DataPoints: []metricdata.DataPoint[float64]{ { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("cf"), Value: attribute.StringValue("foof"), }, attribute.KeyValue{ Key: attribute.Key("df"), Value: attribute.StringValue("barf"), }), Time: endTime1, Value: 123.4, }, { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("cf"), Value: attribute.StringValue("foof"), }, attribute.KeyValue{ Key: attribute.Key("df"), Value: attribute.StringValue("barf"), }), Time: endTime2, Value: 1236.7, }, }, }, }, { Name: "foo.com/sum-a", Description: "an int testing sum", Unit: "ms", Data: metricdata.Sum[int64]{ IsMonotonic: true, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[int64]{ { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("e"), Value: attribute.StringValue("zig"), }, attribute.KeyValue{ Key: attribute.Key("f"), Value: attribute.StringValue("zag"), }), Time: endTime1, Value: 13, }, { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("e"), Value: attribute.StringValue("zig"), }, attribute.KeyValue{ Key: attribute.Key("f"), Value: attribute.StringValue("zag"), }), Time: endTime2, Value: 14, }, }, }, }, { Name: "foo.com/sum-b", Description: "a float testing sum", Unit: "ms", Data: metricdata.Sum[float64]{ IsMonotonic: true, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[float64]{ { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("e"), Value: attribute.StringValue("zig"), }, attribute.KeyValue{ Key: attribute.Key("f"), Value: attribute.StringValue("zag"), }), Time: endTime1, Value: 12.3, }, { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("e"), Value: attribute.StringValue("zig"), }, attribute.KeyValue{ Key: attribute.Key("f"), Value: attribute.StringValue("zag"), }), Time: endTime2, Value: 123.4, }, }, }, }, { Name: "foo.com/summary-a", Description: "a testing summary", Unit: "ms", Data: metricdata.Summary{ DataPoints: []metricdata.SummaryDataPoint{ { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("g"), Value: attribute.StringValue("ding"), }, attribute.KeyValue{ Key: attribute.Key("h"), Value: attribute.StringValue("dong"), }), Time: endTime1, Count: 10, Sum: 13.2, QuantileValues: []metricdata.QuantileValue{ { Quantile: 0.0, Value: 0.1, }, { Quantile: 0.5, Value: 1.0, }, { Quantile: 1.0, Value: 10.4, }, }, }, { Attributes: attribute.NewSet(attribute.KeyValue{ Key: attribute.Key("g"), Value: attribute.StringValue("ding"), }, attribute.KeyValue{ Key: attribute.Key("h"), Value: attribute.StringValue("dong"), }), Time: endTime2, Count: 12, QuantileValues: []metricdata.QuantileValue{ { Quantile: 0.0, Value: 0.2, }, { Quantile: 0.5, Value: 1.1, }, { Quantile: 1.0, Value: 10.5, }, }, }, }, }, }, }, }, { desc: "histogram without data points", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/histogram-a", Description: "a testing histogram", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeCumulativeDistribution, }, }, }, expected: []metricdata.Metrics{ { Name: "foo.com/histogram-a", Description: "a testing histogram", Unit: "1", Data: metricdata.Histogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{}, }, }, }, }, { desc: "sum without data points", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/sum-a", Description: "a testing sum", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeCumulativeFloat64, }, }, }, expected: []metricdata.Metrics{ { Name: "foo.com/sum-a", Description: "a testing sum", Unit: "1", Data: metricdata.Sum[float64]{ IsMonotonic: true, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[float64]{}, }, }, }, }, { desc: "gauge without data points", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/gauge-a", Description: "a testing gauge", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeGaugeInt64, }, }, }, expected: []metricdata.Metrics{ { Name: "foo.com/gauge-a", Description: "a testing gauge", Unit: "1", Data: metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{}, }, }, }, }, { desc: "histogram with negative count", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/histogram-a", Description: "a testing histogram", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeCumulativeDistribution, }, TimeSeries: []*ocmetricdata.TimeSeries{ { Points: []ocmetricdata.Point{ ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ Count: -8, }), }, StartTime: startTime, }, }, }, }, expectedErr: errNegativeCount, }, { desc: "histogram with negative bucket count", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/histogram-a", Description: "a testing histogram", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeCumulativeDistribution, }, TimeSeries: []*ocmetricdata.TimeSeries{ { Points: []ocmetricdata.Point{ ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ Buckets: []ocmetricdata.Bucket{ {Count: -1}, {Count: 2}, {Count: 5}, }, }), }, StartTime: startTime, }, }, }, }, expectedErr: errNegativeBucketCount, }, { desc: "histogram with non-histogram datapoint type", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/bad-point", Description: "a bad type", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeCumulativeDistribution, }, TimeSeries: []*ocmetricdata.TimeSeries{ { Points: []ocmetricdata.Point{ ocmetricdata.NewFloat64Point(endTime1, 1.0), }, StartTime: startTime, }, }, }, }, expectedErr: errMismatchedValueTypes, }, { desc: "summary with mismatched attributes", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/summary-mismatched", Description: "a mismatched summary", Unit: ocmetricdata.UnitMilliseconds, Type: ocmetricdata.TypeSummary, LabelKeys: []ocmetricdata.LabelKey{ {Key: "g"}, }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { LabelValues: []ocmetricdata.LabelValue{ { Value: "ding", Present: true, }, { Value: "dong", Present: true, }, }, Points: []ocmetricdata.Point{ ocmetricdata.NewSummaryPoint(endTime1, &ocmetricdata.Summary{ Count: 10, Sum: 13.2, HasCountAndSum: true, Snapshot: ocmetricdata.Snapshot{ Percentiles: map[float64]float64{ 0.0: 0.1, 0.5: 1.0, 1.0: 10.4, }, }, }), }, }, }, }, }, expectedErr: errMismatchedAttributeKeyValues, }, { desc: "summary with negative count", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/summary-negative", Description: "a negative count summary", Unit: ocmetricdata.UnitMilliseconds, Type: ocmetricdata.TypeSummary, }, TimeSeries: []*ocmetricdata.TimeSeries{ { Points: []ocmetricdata.Point{ ocmetricdata.NewSummaryPoint(endTime1, &ocmetricdata.Summary{ Count: -10, Sum: 13.2, HasCountAndSum: true, Snapshot: ocmetricdata.Snapshot{ Percentiles: map[float64]float64{ 0.0: 0.1, 0.5: 1.0, 1.0: 10.4, }, }, }), }, }, }, }, }, expectedErr: errNegativeCount, }, { desc: "histogram with invalid span context exemplar", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/histogram-a", Description: "a testing histogram", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeCumulativeDistribution, }, TimeSeries: []*ocmetricdata.TimeSeries{ { Points: []ocmetricdata.Point{ ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ Count: 8, Sum: 100.0, BucketOptions: &ocmetricdata.BucketOptions{ Bounds: []float64{1.0, 2.0, 3.0}, }, Buckets: []ocmetricdata.Bucket{ { Count: 1, Exemplar: &ocmetricdata.Exemplar{ Value: 0.8, Timestamp: exemplarTime, Attachments: map[string]interface{}{ ocmetricdata.AttachmentKeySpanContext: "notaspancontext", }, }, }, }, }), }, StartTime: startTime, }, }, }, }, expectedErr: errInvalidExemplarSpanContext, }, { desc: "sum with non-sum datapoint type", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/bad-point", Description: "a bad type", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeCumulativeFloat64, }, TimeSeries: []*ocmetricdata.TimeSeries{ { Points: []ocmetricdata.Point{ ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}), }, StartTime: startTime, }, }, }, }, expectedErr: errMismatchedValueTypes, }, { desc: "gauge with non-gauge datapoint type", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/bad-point", Description: "a bad type", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeGaugeFloat64, }, TimeSeries: []*ocmetricdata.TimeSeries{ { Points: []ocmetricdata.Point{ ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}), }, StartTime: startTime, }, }, }, }, expectedErr: errMismatchedValueTypes, }, { desc: "summary with non-summary datapoint type", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/bad-point", Description: "a bad type", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeSummary, }, TimeSeries: []*ocmetricdata.TimeSeries{ { Points: []ocmetricdata.Point{ ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}), }, StartTime: startTime, }, }, }, }, expectedErr: errMismatchedValueTypes, }, { desc: "unsupported Gauge Distribution type", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/bad-point", Description: "a bad type", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeGaugeDistribution, }, }, }, expectedErr: errAggregationType, }, } { t.Run(tc.desc, func(t *testing.T) { output, err := ConvertMetrics(tc.input) if !errors.Is(err, tc.expectedErr) { t.Errorf("ConvertMetrics(%+v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr) } metricdatatest.AssertEqual[metricdata.ScopeMetrics](t, metricdata.ScopeMetrics{Metrics: tc.expected}, metricdata.ScopeMetrics{Metrics: output}) }) } } func TestConvertAttributes(t *testing.T) { setWithMultipleKeys := attribute.NewSet( attribute.KeyValue{Key: attribute.Key("first"), Value: attribute.StringValue("1")}, attribute.KeyValue{Key: attribute.Key("second"), Value: attribute.StringValue("2")}, ) for _, tc := range []struct { desc string inputKeys []ocmetricdata.LabelKey inputValues []ocmetricdata.LabelValue expected *attribute.Set expectedErr error }{ { desc: "no attributes", expected: attribute.EmptySet(), }, { desc: "different numbers of keys and values", inputKeys: []ocmetricdata.LabelKey{{Key: "foo"}}, expected: attribute.EmptySet(), expectedErr: errMismatchedAttributeKeyValues, }, { desc: "multiple keys and values", inputKeys: []ocmetricdata.LabelKey{{Key: "first"}, {Key: "second"}}, inputValues: []ocmetricdata.LabelValue{ {Value: "1", Present: true}, {Value: "2", Present: true}, }, expected: &setWithMultipleKeys, }, { desc: "multiple keys and values with some not present", inputKeys: []ocmetricdata.LabelKey{{Key: "first"}, {Key: "second"}, {Key: "third"}}, inputValues: []ocmetricdata.LabelValue{ {Value: "1", Present: true}, {Value: "2", Present: true}, {Present: false}, }, expected: &setWithMultipleKeys, }, } { t.Run(tc.desc, func(t *testing.T) { output, err := convertAttrs(tc.inputKeys, tc.inputValues) if !errors.Is(err, tc.expectedErr) { t.Errorf("convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", tc.inputKeys, tc.inputValues, err, tc.expectedErr) } if !output.Equals(tc.expected) { t.Errorf("convertAttrs(keys: %v, values: %v) = %+v, want %+v", tc.inputKeys, tc.inputValues, output.ToSlice(), tc.expected.ToSlice()) } }) } } type fakeStringer string func (f fakeStringer) String() string { return string(f) } func TestConvertKV(t *testing.T) { key := "foo" for _, tt := range []struct { value any expected attribute.Value }{ { value: bool(true), expected: attribute.BoolValue(true), }, { value: []bool{true, false}, expected: attribute.BoolSliceValue([]bool{true, false}), }, { value: int(10), expected: attribute.IntValue(10), }, { value: []int{10, 20}, expected: attribute.IntSliceValue([]int{10, 20}), }, { value: int8(10), expected: attribute.IntValue(10), }, { value: []int8{10, 20}, expected: attribute.IntSliceValue([]int{10, 20}), }, { value: int16(10), expected: attribute.IntValue(10), }, { value: []int16{10, 20}, expected: attribute.IntSliceValue([]int{10, 20}), }, { value: int32(10), expected: attribute.IntValue(10), }, { value: []int32{10, 20}, expected: attribute.IntSliceValue([]int{10, 20}), }, { value: int64(10), expected: attribute.Int64Value(10), }, { value: []int64{10, 20}, expected: attribute.Int64SliceValue([]int64{10, 20}), }, { value: uint(10), expected: attribute.IntValue(10), }, { value: uint(math.MaxUint), expected: attribute.StringValue(fmt.Sprintf("%v", uint(math.MaxUint))), }, { value: []uint{10, 20}, expected: attribute.StringSliceValue([]string{"10", "20"}), }, { value: uint8(10), expected: attribute.IntValue(10), }, { value: []uint8{10, 20}, expected: attribute.StringSliceValue([]string{"10", "20"}), }, { value: uint16(10), expected: attribute.IntValue(10), }, { value: []uint16{10, 20}, expected: attribute.StringSliceValue([]string{"10", "20"}), }, { value: uint32(10), expected: attribute.IntValue(10), }, { value: []uint32{10, 20}, expected: attribute.StringSliceValue([]string{"10", "20"}), }, { value: uint64(10), expected: attribute.Int64Value(10), }, { value: uint64(math.MaxUint64), expected: attribute.StringValue("18446744073709551615"), }, { value: []uint64{10, 20}, expected: attribute.StringSliceValue([]string{"10", "20"}), }, { value: uintptr(10), expected: attribute.Int64Value(10), }, { value: []uintptr{10, 20}, expected: attribute.StringSliceValue([]string{"10", "20"}), }, { value: float32(10), expected: attribute.Float64Value(10), }, { value: []float32{10, 20}, expected: attribute.Float64SliceValue([]float64{10, 20}), }, { value: float64(10), expected: attribute.Float64Value(10), }, { value: []float64{10, 20}, expected: attribute.Float64SliceValue([]float64{10, 20}), }, { value: complex64(10), expected: attribute.StringValue("(10+0i)"), }, { value: []complex64{10, 20}, expected: attribute.StringSliceValue([]string{"(10+0i)", "(20+0i)"}), }, { value: complex128(10), expected: attribute.StringValue("(10+0i)"), }, { value: []complex128{10, 20}, expected: attribute.StringSliceValue([]string{"(10+0i)", "(20+0i)"}), }, { value: "string", expected: attribute.StringValue("string"), }, { value: []string{"string", "slice"}, expected: attribute.StringSliceValue([]string{"string", "slice"}), }, { value: fakeStringer("stringer"), expected: attribute.StringValue("stringer"), }, { value: metricdata.Histogram[float64]{}, expected: attribute.StringValue("unhandled attribute value: {DataPoints:[] Temporality:undefinedTemporality}"), }, } { t.Run(fmt.Sprintf("%v(%+v)", reflect.TypeOf(tt.value), tt.value), func(t *testing.T) { got := convertKV(key, tt.value) assert.Equal(t, key, string(got.Key)) assert.Equal(t, tt.expected, got.Value) }) } } opentelemetry-go-1.21.0/bridge/opencensus/internal/otel2oc/000077500000000000000000000000001452547353200237055ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opencensus/internal/otel2oc/span_context.go000066400000000000000000000021201452547353200267340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel2oc // import "go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc" import ( octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/trace" ) func SpanContext(sc trace.SpanContext) octrace.SpanContext { var to octrace.TraceOptions if sc.IsSampled() { // OpenCensus doesn't expose functions to directly set sampled to = 0x1 } return octrace.SpanContext{ TraceID: octrace.TraceID(sc.TraceID()), SpanID: octrace.SpanID(sc.SpanID()), TraceOptions: to, } } opentelemetry-go-1.21.0/bridge/opencensus/internal/otel2oc/span_context_test.go000066400000000000000000000035211452547353200300010ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel2oc import ( "testing" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/trace" ) func TestSpanContextConversion(t *testing.T) { for _, tc := range []struct { description string input trace.SpanContext expected octrace.SpanContext }{ { description: "empty", }, { description: "sampled", input: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID([16]byte{1}), SpanID: trace.SpanID([8]byte{2}), TraceFlags: trace.FlagsSampled, }), expected: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{1}), SpanID: octrace.SpanID([8]byte{2}), TraceOptions: octrace.TraceOptions(0x1), }, }, { description: "not sampled", input: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID([16]byte{1}), SpanID: trace.SpanID([8]byte{2}), }), expected: octrace.SpanContext{ TraceID: octrace.TraceID([16]byte{1}), SpanID: octrace.SpanID([8]byte{2}), TraceOptions: octrace.TraceOptions(0), }, }, } { t.Run(tc.description, func(t *testing.T) { output := SpanContext(tc.input) if output != tc.expected { t.Fatalf("Got %+v spancontext, expected %+v.", output, tc.expected) } }) } } opentelemetry-go-1.21.0/bridge/opencensus/internal/span.go000066400000000000000000000103521452547353200236270ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/bridge/opencensus/internal" import ( "fmt" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" "go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" ) const ( // MessageSendEvent is the name of the message send event. MessageSendEvent = "message send" // MessageReceiveEvent is the name of the message receive event. MessageReceiveEvent = "message receive" ) var ( // UncompressedKey is used for the uncompressed byte size attribute. UncompressedKey = attribute.Key("uncompressed byte size") // CompressedKey is used for the compressed byte size attribute. CompressedKey = attribute.Key("compressed byte size") ) // Span is an OpenCensus SpanInterface wrapper for an OpenTelemetry Span. type Span struct { otelSpan trace.Span } // NewSpan returns an OpenCensus Span wrapping an OpenTelemetry Span. func NewSpan(s trace.Span) *octrace.Span { return octrace.NewSpan(&Span{otelSpan: s}) } // IsRecordingEvents returns true if events are being recorded for this span. func (s *Span) IsRecordingEvents() bool { return s.otelSpan.IsRecording() } // End ends this span. func (s *Span) End() { s.otelSpan.End() } // SpanContext returns the SpanContext of this span. func (s *Span) SpanContext() octrace.SpanContext { return otel2oc.SpanContext(s.otelSpan.SpanContext()) } // SetName sets the name of this span, if it is recording events. func (s *Span) SetName(name string) { s.otelSpan.SetName(name) } // SetStatus sets the status of this span, if it is recording events. func (s *Span) SetStatus(status octrace.Status) { s.otelSpan.SetStatus(codes.Code(status.Code), status.Message) } // AddAttributes sets attributes in this span. func (s *Span) AddAttributes(attributes ...octrace.Attribute) { s.otelSpan.SetAttributes(oc2otel.Attributes(attributes)...) } // Annotate adds an annotation with attributes to this span. func (s *Span) Annotate(attributes []octrace.Attribute, str string) { s.otelSpan.AddEvent(str, trace.WithAttributes(oc2otel.Attributes(attributes)...)) } // Annotatef adds a formatted annotation with attributes to this span. func (s *Span) Annotatef(attributes []octrace.Attribute, format string, a ...interface{}) { s.Annotate(attributes, fmt.Sprintf(format, a...)) } // AddMessageSendEvent adds a message send event to this span. func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { s.otelSpan.AddEvent(MessageSendEvent, trace.WithAttributes( attribute.KeyValue{ Key: UncompressedKey, Value: attribute.Int64Value(uncompressedByteSize), }, attribute.KeyValue{ Key: CompressedKey, Value: attribute.Int64Value(compressedByteSize), }), ) } // AddMessageReceiveEvent adds a message receive event to this span. func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { s.otelSpan.AddEvent(MessageReceiveEvent, trace.WithAttributes( attribute.KeyValue{ Key: UncompressedKey, Value: attribute.Int64Value(uncompressedByteSize), }, attribute.KeyValue{ Key: CompressedKey, Value: attribute.Int64Value(compressedByteSize), }), ) } // AddLink adds a link to this span. func (s *Span) AddLink(l octrace.Link) { Handle(fmt.Errorf("ignoring OpenCensus link %+v for span %q because OpenTelemetry doesn't support setting links after creation", l, s.String())) } // String prints a string representation of this span. func (s *Span) String() string { return fmt.Sprintf("span %s", s.otelSpan.SpanContext().SpanID().String()) } opentelemetry-go-1.21.0/bridge/opencensus/internal/span_test.go000066400000000000000000000167071452547353200247000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal_test import ( "testing" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/bridge/opencensus/internal" "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" "go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" ) type span struct { trace.Span recording bool ended bool sc trace.SpanContext name string sCode codes.Code sMsg string attrs []attribute.KeyValue eName string eOpts []trace.EventOption } func (s *span) IsRecording() bool { return s.recording } func (s *span) End(...trace.SpanEndOption) { s.ended = true } func (s *span) SpanContext() trace.SpanContext { return s.sc } func (s *span) SetName(n string) { s.name = n } func (s *span) SetStatus(c codes.Code, d string) { s.sCode, s.sMsg = c, d } func (s *span) SetAttributes(a ...attribute.KeyValue) { s.attrs = a } func (s *span) AddEvent(n string, o ...trace.EventOption) { s.eName, s.eOpts = n, o } func TestSpanIsRecordingEvents(t *testing.T) { s := &span{recording: true} ocS := internal.NewSpan(s) if !ocS.IsRecordingEvents() { t.Errorf("span.IsRecordingEvents() = false, want true") } s.recording = false if ocS.IsRecordingEvents() { t.Errorf("span.IsRecordingEvents() = true, want false") } } func TestSpanEnd(t *testing.T) { s := new(span) ocS := internal.NewSpan(s) if s.ended { t.Fatal("new span already ended") } ocS.End() if !s.ended { t.Error("span.End() did not end OpenTelemetry span") } } func TestSpanSpanContext(t *testing.T) { sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{1}, SpanID: [8]byte{1}, }) // Do not test the conversion, only that the method is called. converted := otel2oc.SpanContext(sc) s := &span{sc: sc} ocS := internal.NewSpan(s) if ocS.SpanContext() != converted { t.Error("span.SpanContext did not use OpenTelemetry SpanContext") } } func TestSpanSetName(t *testing.T) { // OpenCensus does not set a name if not recording. s := &span{recording: true} ocS := internal.NewSpan(s) name := "test name" ocS.SetName(name) if s.name != name { t.Error("span.SetName did not set OpenTelemetry span name") } } func TestSpanSetStatus(t *testing.T) { // OpenCensus does not set a status if not recording. s := &span{recording: true} ocS := internal.NewSpan(s) c, d := codes.Error, "error" status := octrace.Status{Code: int32(c), Message: d} ocS.SetStatus(status) if s.sCode != c { t.Error("span.SetStatus failed to set OpenTelemetry status code") } if s.sMsg != d { t.Error("span.SetStatus failed to set OpenTelemetry status description") } } func TestSpanAddAttributes(t *testing.T) { attrs := []octrace.Attribute{ octrace.BoolAttribute("a", true), } // Do not test the conversion, only that the method is called. converted := oc2otel.Attributes(attrs) // OpenCensus does not set attributes if not recording. s := &span{recording: true} ocS := internal.NewSpan(s) ocS.AddAttributes(attrs...) if len(s.attrs) != len(converted) || s.attrs[0] != converted[0] { t.Error("span.AddAttributes failed to set OpenTelemetry attributes") } } func TestSpanAnnotate(t *testing.T) { name := "annotation" attrs := []octrace.Attribute{ octrace.BoolAttribute("a", true), } // Do not test the conversion, only that the method is called. want := oc2otel.Attributes(attrs) // OpenCensus does not set events if not recording. s := &span{recording: true} ocS := internal.NewSpan(s) ocS.Annotate(attrs, name) if s.eName != name { t.Error("span.Annotate did not set event name") } config := trace.NewEventConfig(s.eOpts...) got := config.Attributes() if len(want) != len(got) || want[0] != got[0] { t.Error("span.Annotate did not set event options") } } func TestSpanAnnotatef(t *testing.T) { format := "annotation %s" attrs := []octrace.Attribute{ octrace.BoolAttribute("a", true), } // Do not test the conversion, only that the method is called. want := oc2otel.Attributes(attrs) // OpenCensus does not set events if not recording. s := &span{recording: true} ocS := internal.NewSpan(s) ocS.Annotatef(attrs, format, "a") if s.eName != "annotation a" { t.Error("span.Annotatef did not set event name") } config := trace.NewEventConfig(s.eOpts...) got := config.Attributes() if len(want) != len(got) || want[0] != got[0] { t.Error("span.Annotatef did not set event options") } } func TestSpanAddMessageSendEvent(t *testing.T) { var u, c int64 = 1, 2 // OpenCensus does not set events if not recording. s := &span{recording: true} ocS := internal.NewSpan(s) ocS.AddMessageSendEvent(0, u, c) if s.eName != internal.MessageSendEvent { t.Error("span.AddMessageSendEvent did not set event name") } config := trace.NewEventConfig(s.eOpts...) got := config.Attributes() if len(got) != 2 { t.Fatalf("span.AddMessageSendEvent set %d attributes, want 2", len(got)) } want := attribute.KeyValue{Key: internal.UncompressedKey, Value: attribute.Int64Value(u)} if got[0] != want { t.Errorf("span.AddMessageSendEvent wrong uncompressed attribute: %v", got[0]) } want = attribute.KeyValue{Key: internal.CompressedKey, Value: attribute.Int64Value(c)} if got[1] != want { t.Errorf("span.AddMessageSendEvent wrong compressed attribute: %v", got[1]) } } func TestSpanAddMessageReceiveEvent(t *testing.T) { var u, c int64 = 3, 4 // OpenCensus does not set events if not recording. s := &span{recording: true} ocS := internal.NewSpan(s) ocS.AddMessageReceiveEvent(0, u, c) if s.eName != internal.MessageReceiveEvent { t.Error("span.AddMessageReceiveEvent did not set event name") } config := trace.NewEventConfig(s.eOpts...) got := config.Attributes() if len(got) != 2 { t.Fatalf("span.AddMessageReceiveEvent set %d attributes, want 2", len(got)) } want := attribute.KeyValue{Key: internal.UncompressedKey, Value: attribute.Int64Value(u)} if got[0] != want { t.Errorf("span.AddMessageReceiveEvent wrong uncompressed attribute: %v", got[0]) } want = attribute.KeyValue{Key: internal.CompressedKey, Value: attribute.Int64Value(c)} if got[1] != want { t.Errorf("span.AddMessageReceiveEvent wrong compressed attribute: %v", got[1]) } } func TestSpanAddLinkFails(t *testing.T) { h, restore := withHandler() defer restore() // OpenCensus does not try to set links if not recording. s := &span{recording: true} ocS := internal.NewSpan(s) ocS.AddLink(octrace.Link{}) if h.err == nil { t.Error("span.AddLink failed to raise an error") } } func TestSpanString(t *testing.T) { sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{1}, SpanID: [8]byte{1}, }) s := &span{sc: sc} ocS := internal.NewSpan(s) if expected := "span 0100000000000000"; ocS.String() != expected { t.Errorf("span.String = %q, not %q", ocS.String(), expected) } } opentelemetry-go-1.21.0/bridge/opencensus/internal/tracer.go000066400000000000000000000051221452547353200241450ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/bridge/opencensus/internal" import ( "context" "fmt" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" "go.opentelemetry.io/otel/trace" ) // Tracer is an OpenCensus Tracer that wraps an OpenTelemetry Tracer. type Tracer struct { otelTracer trace.Tracer } // NewTracer returns an OpenCensus Tracer that wraps the OpenTelemetry tracer. func NewTracer(tracer trace.Tracer) octrace.Tracer { return &Tracer{otelTracer: tracer} } // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, it creates a new trace and span. func (o *Tracer) StartSpan(ctx context.Context, name string, s ...octrace.StartOption) (context.Context, *octrace.Span) { otelOpts, err := oc2otel.StartOptions(s) if err != nil { Handle(fmt.Errorf("starting span %q: %w", name, err)) } ctx, sp := o.otelTracer.Start(ctx, name, otelOpts...) return ctx, NewSpan(sp) } // StartSpanWithRemoteParent starts a new child span of the span from the // given parent. func (o *Tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent octrace.SpanContext, s ...octrace.StartOption) (context.Context, *octrace.Span) { // make sure span context is zero'd out so we use the remote parent ctx = trace.ContextWithSpan(ctx, nil) ctx = trace.ContextWithRemoteSpanContext(ctx, oc2otel.SpanContext(parent)) return o.StartSpan(ctx, name, s...) } // FromContext returns the Span stored in a context. func (o *Tracer) FromContext(ctx context.Context) *octrace.Span { return NewSpan(trace.SpanFromContext(ctx)) } // NewContext returns a new context with the given Span attached. func (o *Tracer) NewContext(parent context.Context, s *octrace.Span) context.Context { if otSpan, ok := s.Internal().(*Span); ok { return trace.ContextWithSpan(parent, otSpan.otelSpan) } Handle(fmt.Errorf("unable to create context with span %q, since it was created using a different tracer", s.String())) return parent } opentelemetry-go-1.21.0/bridge/opencensus/internal/tracer_test.go000066400000000000000000000114441452547353200252100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal_test import ( "context" "testing" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/bridge/opencensus/internal" "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" "go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) type handler struct{ err error } func (h *handler) Handle(e error) { h.err = e } func withHandler() (*handler, func()) { h := new(handler) original := internal.Handle internal.Handle = h.Handle return h, func() { internal.Handle = original } } type tracer struct { embedded.Tracer ctx context.Context name string opts []trace.SpanStartOption } func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { t.ctx, t.name, t.opts = ctx, name, opts sub := noop.NewTracerProvider().Tracer("testing") return sub.Start(ctx, name, opts...) } type ctxKey string func TestTracerStartSpan(t *testing.T) { h, restore := withHandler() defer restore() otelTracer := &tracer{} ocTracer := internal.NewTracer(otelTracer) ctx := context.WithValue(context.Background(), ctxKey("key"), "value") name := "testing span" ocTracer.StartSpan(ctx, name, octrace.WithSpanKind(octrace.SpanKindClient)) if h.err != nil { t.Fatalf("OC tracer.StartSpan errored: %v", h.err) } if otelTracer.ctx != ctx { t.Error("OTel tracer.Start called with wrong context") } if otelTracer.name != name { t.Error("OTel tracer.Start called with wrong name") } sk := trace.SpanKindClient c := trace.NewSpanStartConfig(otelTracer.opts...) if c.SpanKind() != sk { t.Errorf("OTel tracer.Start called with wrong options: %#v", c) } } func TestTracerStartSpanReportsErrors(t *testing.T) { h, restore := withHandler() defer restore() ocTracer := internal.NewTracer(&tracer{}) ocTracer.StartSpan(context.Background(), "", octrace.WithSampler(octrace.AlwaysSample())) if h.err == nil { t.Error("OC tracer.StartSpan no error when converting Sampler") } } func TestTracerStartSpanWithRemoteParent(t *testing.T) { otelTracer := new(tracer) ocTracer := internal.NewTracer(otelTracer) sc := octrace.SpanContext{TraceID: [16]byte{1}, SpanID: [8]byte{1}} converted := oc2otel.SpanContext(sc).WithRemote(true) ocTracer.StartSpanWithRemoteParent(context.Background(), "", sc) got := trace.SpanContextFromContext(otelTracer.ctx) if !got.Equal(converted) { t.Error("tracer.StartSpanWithRemoteParent failed to set remote parent") } } func TestTracerFromContext(t *testing.T) { sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{1}, SpanID: [8]byte{1}, }) ctx := trace.ContextWithSpanContext(context.Background(), sc) tracer := noop.NewTracerProvider().Tracer("TestTracerFromContext") // Test using the fact that the No-Op span will propagate a span context . ctx, _ = tracer.Start(ctx, "test") got := internal.NewTracer(tracer).FromContext(ctx).SpanContext() // Do not test the convedsion, only that the propagtion. want := otel2oc.SpanContext(sc) if got != want { t.Errorf("tracer.FromContext returned wrong context: %#v", got) } } func TestTracerNewContext(t *testing.T) { sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{1}, SpanID: [8]byte{1}, }) ctx := trace.ContextWithSpanContext(context.Background(), sc) tracer := noop.NewTracerProvider().Tracer("TestTracerNewContext") // Test using the fact that the No-Op span will propagate a span context . _, s := tracer.Start(ctx, "test") ocTracer := internal.NewTracer(tracer) ctx = ocTracer.NewContext(context.Background(), internal.NewSpan(s)) got := trace.SpanContextFromContext(ctx) if !got.Equal(sc) { t.Error("tracer.NewContext did not attach Span to context") } } type differentSpan struct { octrace.SpanInterface } func (s *differentSpan) String() string { return "testing span" } func TestTracerNewContextErrors(t *testing.T) { h, restore := withHandler() defer restore() ocTracer := internal.NewTracer(&tracer{}) ocSpan := octrace.NewSpan(&differentSpan{}) ocTracer.NewContext(context.Background(), ocSpan) if h.err == nil { t.Error("tracer.NewContext did not error for unrecognized span") } } opentelemetry-go-1.21.0/bridge/opencensus/metric.go000066400000000000000000000040661452547353200223420ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" import ( "context" ocmetricdata "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" internal "go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // MetricProducer implements the [go.opentelemetry.io/otel/sdk/metric.Producer] to provide metrics // from OpenCensus to the OpenTelemetry SDK. type MetricProducer struct { manager *metricproducer.Manager } // NewMetricProducer returns a metric.Producer that fetches metrics from // OpenCensus. func NewMetricProducer(opts ...MetricOption) *MetricProducer { return &MetricProducer{ manager: metricproducer.GlobalManager(), } } var _ metric.Producer = (*MetricProducer)(nil) // Produce fetches metrics from the OpenCensus manager, // translates them to OpenTelemetry's data model, and returns them. func (p *MetricProducer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) { producers := p.manager.GetAll() data := []*ocmetricdata.Metric{} for _, ocProducer := range producers { data = append(data, ocProducer.Read()...) } otelmetrics, err := internal.ConvertMetrics(data) if len(otelmetrics) == 0 { return nil, err } return []metricdata.ScopeMetrics{{ Scope: instrumentation.Scope{ Name: scopeName, Version: Version(), }, Metrics: otelmetrics, }}, err } opentelemetry-go-1.21.0/bridge/opencensus/metric_test.go000066400000000000000000000075541452547353200234060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" import ( "context" "testing" "time" "github.com/stretchr/testify/require" ocmetricdata "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ocresource "go.opencensus.io/resource" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" ) func TestMetricProducer(t *testing.T) { now := time.Now() for _, tc := range []struct { desc string input []*ocmetricdata.Metric expected []metricdata.ScopeMetrics expectErr bool }{ { desc: "empty", expected: nil, }, { desc: "success", input: []*ocmetricdata.Metric{ { Resource: &ocresource.Resource{ Labels: map[string]string{ "R1": "V1", "R2": "V2", }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { StartTime: now, Points: []ocmetricdata.Point{ {Value: int64(123), Time: now}, }, }, }, }, }, expected: []metricdata.ScopeMetrics{{ Scope: instrumentation.Scope{ Name: scopeName, Version: Version(), }, Metrics: []metricdata.Metrics{ { Data: metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ { Attributes: attribute.NewSet(), StartTime: now, Time: now, Value: 123, }, }, }, }, }, }}, }, { desc: "partial success", input: []*ocmetricdata.Metric{ { Descriptor: ocmetricdata.Descriptor{ Name: "foo.com/bad-point", Description: "a bad type", Unit: ocmetricdata.UnitDimensionless, Type: ocmetricdata.TypeGaugeDistribution, }, }, { Resource: &ocresource.Resource{ Labels: map[string]string{ "R1": "V1", "R2": "V2", }, }, TimeSeries: []*ocmetricdata.TimeSeries{ { StartTime: now, Points: []ocmetricdata.Point{ {Value: int64(123), Time: now}, }, }, }, }, }, expected: []metricdata.ScopeMetrics{{ Scope: instrumentation.Scope{ Name: scopeName, Version: Version(), }, Metrics: []metricdata.Metrics{ { Data: metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ { Attributes: attribute.NewSet(), StartTime: now, Time: now, Value: 123, }, }, }, }, }, }}, expectErr: true, }, } { t.Run(tc.desc, func(t *testing.T) { fakeProducer := &fakeOCProducer{metrics: tc.input} metricproducer.GlobalManager().AddProducer(fakeProducer) defer metricproducer.GlobalManager().DeleteProducer(fakeProducer) output, err := NewMetricProducer().Produce(context.Background()) if tc.expectErr { require.Error(t, err) } else { require.Nil(t, err) } require.Equal(t, len(output), len(tc.expected)) for i := range output { metricdatatest.AssertEqual(t, tc.expected[i], output[i]) } }) } } type fakeOCProducer struct { metrics []*ocmetricdata.Metric } func (f *fakeOCProducer) Read() []*ocmetricdata.Metric { return f.metrics } opentelemetry-go-1.21.0/bridge/opencensus/test/000077500000000000000000000000001452547353200215015ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opencensus/test/bridge_test.go000066400000000000000000000224231452547353200243260ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "testing" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/attribute" ocbridge "go.opentelemetry.io/otel/bridge/opencensus" "go.opentelemetry.io/otel/bridge/opencensus/internal" "go.opentelemetry.io/otel/codes" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" "go.opentelemetry.io/otel/trace" ) func TestMixedAPIs(t *testing.T) { sr := tracetest.NewSpanRecorder() tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) tracer := tp.Tracer("mixedapitracer") ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp)) func() { ctx := context.Background() var ocspan1 *octrace.Span ctx, ocspan1 = octrace.StartSpan(ctx, "OpenCensusSpan1") defer ocspan1.End() var otspan1 trace.Span ctx, otspan1 = tracer.Start(ctx, "OpenTelemetrySpan1") defer otspan1.End() var ocspan2 *octrace.Span ctx, ocspan2 = octrace.StartSpan(ctx, "OpenCensusSpan2") defer ocspan2.End() var otspan2 trace.Span _, otspan2 = tracer.Start(ctx, "OpenTelemetrySpan2") defer otspan2.End() }() spans := sr.Ended() if len(spans) != 4 { for _, span := range spans { t.Logf("Span: %s", span.Name()) } t.Fatalf("Got %d spans, expected %d.", len(spans), 4) } var parent trace.SpanContext for i := len(spans) - 1; i >= 0; i-- { // Verify that OpenCensus spans and OpenTelemetry spans have each // other as parents. if psid := spans[i].Parent().SpanID(); psid != parent.SpanID() { t.Errorf("Span %v had parent %v. Expected %v", spans[i].Name(), psid, parent.SpanID()) } parent = spans[i].SpanContext() } } func TestStartOptions(t *testing.T) { sr := tracetest.NewSpanRecorder() tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp)) ctx := context.Background() _, span := octrace.StartSpan(ctx, "OpenCensusSpan", octrace.WithSpanKind(octrace.SpanKindClient)) span.End() spans := sr.Ended() if len(spans) != 1 { t.Fatalf("Got %d spans, expected %d", len(spans), 1) } if spans[0].SpanKind() != trace.SpanKindClient { t.Errorf("Got span kind %v, expected %d", spans[0].SpanKind(), trace.SpanKindClient) } } func TestStartSpanWithRemoteParent(t *testing.T) { sr := tracetest.NewSpanRecorder() tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp)) tracer := tp.Tracer("remoteparent") ctx := context.Background() ctx, parent := tracer.Start(ctx, "OpenTelemetrySpan1") _, span := octrace.StartSpanWithRemoteParent(ctx, "OpenCensusSpan", ocbridge.OTelSpanContextToOC(parent.SpanContext())) span.End() spans := sr.Ended() if len(spans) != 1 { t.Fatalf("Got %d spans, expected %d", len(spans), 1) } if psid := spans[0].Parent().SpanID(); psid != parent.SpanContext().SpanID() { t.Errorf("Span %v, had parent %v. Expected %d", spans[0].Name(), psid, parent.SpanContext().SpanID()) } } func TestToFromContext(t *testing.T) { sr := tracetest.NewSpanRecorder() tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp)) tracer := tp.Tracer("tofromcontext") func() { ctx := context.Background() _, otSpan1 := tracer.Start(ctx, "OpenTelemetrySpan1") defer otSpan1.End() // Use NewContext instead of the context from Start ctx = octrace.NewContext(ctx, internal.NewSpan(otSpan1)) ctx, _ = tracer.Start(ctx, "OpenTelemetrySpan2") // Get the opentelemetry span using the OpenCensus FromContext, and end it otSpan2 := octrace.FromContext(ctx) defer otSpan2.End() }() spans := sr.Ended() if len(spans) != 2 { t.Fatalf("Got %d spans, expected %d.", len(spans), 2) } var parent trace.SpanContext for i := len(spans) - 1; i >= 0; i-- { // Verify that OpenCensus spans and OpenTelemetry spans have each // other as parents. if psid := spans[i].Parent().SpanID(); psid != parent.SpanID() { t.Errorf("Span %v had parent %v. Expected %v", spans[i].Name(), psid, parent.SpanID()) } parent = spans[i].SpanContext() } } func TestIsRecordingEvents(t *testing.T) { sr := tracetest.NewSpanRecorder() tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp)) ctx := context.Background() _, ocspan := octrace.StartSpan(ctx, "OpenCensusSpan1") if !ocspan.IsRecordingEvents() { t.Errorf("Got %v, expected true", ocspan.IsRecordingEvents()) } } func attrsMap(s []attribute.KeyValue) map[attribute.Key]attribute.Value { m := make(map[attribute.Key]attribute.Value, len(s)) for _, a := range s { m[a.Key] = a.Value } return m } func TestSetThings(t *testing.T) { sr := tracetest.NewSpanRecorder() tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp)) ctx := context.Background() _, ocspan := octrace.StartSpan(ctx, "OpenCensusSpan1") ocspan.SetName("span-foo") ocspan.SetStatus(octrace.Status{Code: 1, Message: "foo"}) ocspan.AddAttributes( octrace.BoolAttribute("bool", true), octrace.Int64Attribute("int64", 12345), octrace.Float64Attribute("float64", 12.345), octrace.StringAttribute("string", "stringval"), ) ocspan.Annotate( []octrace.Attribute{octrace.StringAttribute("string", "annotateval")}, "annotate", ) ocspan.Annotatef( []octrace.Attribute{ octrace.Int64Attribute("int64", 12345), octrace.Float64Attribute("float64", 12.345), }, "annotate%d", 67890, ) ocspan.AddMessageSendEvent(123, 456, 789) ocspan.AddMessageReceiveEvent(246, 135, 369) ocspan.End() spans := sr.Ended() if len(spans) != 1 { t.Fatalf("Got %d spans, expected %d.", len(spans), 1) } s := spans[0] if s.Name() != "span-foo" { t.Errorf("Got name %v, expected span-foo", s.Name()) } if s.Status().Code != codes.Error { t.Errorf("Got code %v, expected %v", s.Status().Code, codes.Error) } if s.Status().Description != "foo" { t.Errorf("Got code %v, expected foo", s.Status().Description) } attrs := attrsMap(s.Attributes()) if v := attrs[attribute.Key("bool")]; !v.AsBool() { t.Errorf("Got attributes[bool] %v, expected true", v.AsBool()) } if v := attrs[attribute.Key("int64")]; v.AsInt64() != 12345 { t.Errorf("Got attributes[int64] %v, expected 12345", v.AsInt64()) } if v := attrs[attribute.Key("float64")]; v.AsFloat64() != 12.345 { t.Errorf("Got attributes[float64] %v, expected 12.345", v.AsFloat64()) } if v := attrs[attribute.Key("string")]; v.AsString() != "stringval" { t.Errorf("Got attributes[string] %v, expected stringval", v.AsString()) } if len(s.Events()) != 4 { t.Fatalf("Got len(events) = %v, expected 4", len(s.Events())) } annotateEvent := s.Events()[0] aeAttrs := attrsMap(annotateEvent.Attributes) annotatefEvent := s.Events()[1] afeAttrs := attrsMap(annotatefEvent.Attributes) sendEvent := s.Events()[2] receiveEvent := s.Events()[3] if v := aeAttrs[attribute.Key("string")]; v.AsString() != "annotateval" { t.Errorf("Got annotateEvent.Attributes[string] = %v, expected annotateval", v.AsString()) } if annotateEvent.Name != "annotate" { t.Errorf("Got annotateEvent.Name = %v, expected annotate", annotateEvent.Name) } if v := afeAttrs[attribute.Key("int64")]; v.AsInt64() != 12345 { t.Errorf("Got annotatefEvent.Attributes[int64] = %v, expected 12345", v.AsInt64()) } if v := afeAttrs[attribute.Key("float64")]; v.AsFloat64() != 12.345 { t.Errorf("Got annotatefEvent.Attributes[float64] = %v, expected 12.345", v.AsFloat64()) } if annotatefEvent.Name != "annotate67890" { t.Errorf("Got annotatefEvent.Name = %v, expected annotate67890", annotatefEvent.Name) } if v := aeAttrs[attribute.Key("string")]; v.AsString() != "annotateval" { t.Errorf("Got annotateEvent.Attributes[string] = %v, expected annotateval", v.AsString()) } seAttrs := attrsMap(sendEvent.Attributes) reAttrs := attrsMap(receiveEvent.Attributes) if sendEvent.Name != internal.MessageSendEvent { t.Errorf("Got sendEvent.Name = %v, expected message send", sendEvent.Name) } if v := seAttrs[internal.UncompressedKey]; v.AsInt64() != 456 { t.Errorf("Got sendEvent.Attributes[uncompressedKey] = %v, expected 456", v.AsInt64()) } if v := seAttrs[internal.CompressedKey]; v.AsInt64() != 789 { t.Errorf("Got sendEvent.Attributes[compressedKey] = %v, expected 789", v.AsInt64()) } if receiveEvent.Name != internal.MessageReceiveEvent { t.Errorf("Got receiveEvent.Name = %v, expected message receive", receiveEvent.Name) } if v := reAttrs[internal.UncompressedKey]; v.AsInt64() != 135 { t.Errorf("Got receiveEvent.Attributes[uncompressedKey] = %v, expected 135", v.AsInt64()) } if v := reAttrs[internal.CompressedKey]; v.AsInt64() != 369 { t.Errorf("Got receiveEvent.Attributes[compressedKey] = %v, expected 369", v.AsInt64()) } } opentelemetry-go-1.21.0/bridge/opencensus/test/go.mod000066400000000000000000000016521452547353200226130ustar00rootroot00000000000000module go.opentelemetry.io/otel/bridge/opencensus/test go 1.20 require ( go.opencensus.io v0.24.0 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/bridge/opencensus v0.44.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect ) replace go.opentelemetry.io/otel => ../../.. replace go.opentelemetry.io/otel/bridge/opencensus => ../ replace go.opentelemetry.io/otel/sdk => ../../../sdk replace go.opentelemetry.io/otel/trace => ../../../trace replace go.opentelemetry.io/otel/metric => ../../../metric replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric opentelemetry-go-1.21.0/bridge/opencensus/test/go.sum000066400000000000000000000240601452547353200226360ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= opentelemetry-go-1.21.0/bridge/opencensus/trace.go000066400000000000000000000036101452547353200221470ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" import ( octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel/bridge/opencensus/internal" "go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel" "go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc" "go.opentelemetry.io/otel/trace" ) // InstallTraceBridge installs the OpenCensus trace bridge, which overwrites // the global OpenCensus tracer implementation. Once the bridge is installed, // spans recorded using OpenCensus are redirected to the OpenTelemetry SDK. func InstallTraceBridge(opts ...TraceOption) { octrace.DefaultTracer = newTraceBridge(opts) } func newTraceBridge(opts []TraceOption) octrace.Tracer { cfg := newTraceConfig(opts) return internal.NewTracer( cfg.tp.Tracer(scopeName, trace.WithInstrumentationVersion(Version())), ) } // OTelSpanContextToOC converts from an OpenTelemetry SpanContext to an // OpenCensus SpanContext, and handles any incompatibilities with the global // error handler. func OTelSpanContextToOC(sc trace.SpanContext) octrace.SpanContext { return otel2oc.SpanContext(sc) } // OCSpanContextToOTel converts from an OpenCensus SpanContext to an // OpenTelemetry SpanContext. func OCSpanContextToOTel(sc octrace.SpanContext) trace.SpanContext { return oc2otel.SpanContext(sc) } opentelemetry-go-1.21.0/bridge/opencensus/trace_test.go000066400000000000000000000025341452547353200232120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" ) func TestNewTraceBridge(t *testing.T) { exporter := tracetest.NewInMemoryExporter() tp := trace.NewTracerProvider(trace.WithSyncer(exporter)) bridge := newTraceBridge([]TraceOption{WithTracerProvider(tp)}) _, span := bridge.StartSpan(context.Background(), "foo") span.End() gotSpans := exporter.GetSpans() require.Len(t, gotSpans, 1) gotSpan := gotSpans[0] assert.Equal(t, gotSpan.InstrumentationLibrary.Name, scopeName) assert.Equal(t, gotSpan.InstrumentationLibrary.Version, Version()) } opentelemetry-go-1.21.0/bridge/opencensus/version.go000066400000000000000000000014201452547353200225330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" // Version is the current release version of the opencensus bridge. func Version() string { return "0.44.0" } opentelemetry-go-1.21.0/bridge/opentracing/000077500000000000000000000000001452547353200206515ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opentracing/README.md000066400000000000000000000041111452547353200221250ustar00rootroot00000000000000# OpenTelemetry/OpenTracing Bridge ## Getting started `go get go.opentelemetry.io/otel/bridge/opentracing` Assuming you have configured an OpenTelemetry `TracerProvider`, these will be the steps to follow to wire up the bridge: ```go import ( "go.opentelemetry.io/otel" otelBridge "go.opentelemetry.io/otel/bridge/opentracing" ) func main() { /* Create tracerProvider and configure OpenTelemetry ... */ otelTracer := tracerProvider.Tracer("tracer_name") // Use the bridgeTracer as your OpenTracing tracer. bridgeTracer, wrapperTracerProvider := otelBridge.NewTracerPair(otelTracer) // Set the wrapperTracerProvider as the global OpenTelemetry // TracerProvider so instrumentation will use it by default. otel.SetTracerProvider(wrapperTracerProvider) /* ... */ } ``` ## Interop from trace context from OpenTracing to OpenTelemetry In order to get OpenTracing spans properly into the OpenTelemetry context, so they can be propagated (both internally, and externally), you will need to explicitly use the `BridgeTracer` for creating your OpenTracing spans, rather than a bare OpenTracing `Tracer` instance. When you have started an OpenTracing Span, make sure the OpenTelemetry knows about it like this: ```go ctxWithOTSpan := opentracing.ContextWithSpan(ctx, otSpan) ctxWithOTAndOTelSpan := bridgeTracer.ContextWithSpanHook(ctxWithOTSpan, otSpan) // Propagate the otSpan to both OpenTracing and OpenTelemetry // instrumentation by using the ctxWithOTAndOTelSpan context. ``` ## Extended Functionality The bridge functionality can be extended beyond the OpenTracing API. Any [`trace.SpanContext`](https://pkg.go.dev/go.opentelemetry.io/otel/trace#SpanContext) method can be accessed as following: ```go type spanContextProvider interface { IsSampled() bool TraceID() trace.TraceID SpanID() trace.SpanID TraceFlags() trace.TraceFlags ... // any other available method can be added here to access it } var sc opentracing.SpanContext = ... if s, ok := sc.(spanContextProvider); ok { // Use TraceID by s.TraceID() // Use SpanID by s.SpanID() // Use TraceFlags by s.TraceFlags() ... } ``` opentelemetry-go-1.21.0/bridge/opentracing/bridge.go000066400000000000000000000547531452547353200224520ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "context" "fmt" "net/http" "strings" "sync" ot "github.com/opentracing/opentracing-go" otext "github.com/opentracing/opentracing-go/ext" otlog "github.com/opentracing/opentracing-go/log" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/bridge/opentracing/migration" "go.opentelemetry.io/otel/codes" iBaggage "go.opentelemetry.io/otel/internal/baggage" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" ) var ( noopTracer = noop.NewTracerProvider().Tracer("") noopSpan = func() trace.Span { _, s := noopTracer.Start(context.Background(), "") return s }() ) type bridgeSpanContext struct { bag baggage.Baggage trace.SpanContext } var _ ot.SpanContext = &bridgeSpanContext{} func newBridgeSpanContext(otelSpanContext trace.SpanContext, parentOtSpanContext ot.SpanContext) *bridgeSpanContext { bCtx := &bridgeSpanContext{ bag: baggage.Baggage{}, SpanContext: otelSpanContext, } if parentOtSpanContext != nil { parentOtSpanContext.ForeachBaggageItem(func(key, value string) bool { bCtx.setBaggageItem(key, value) return true }) } return bCtx } func (c *bridgeSpanContext) ForeachBaggageItem(handler func(k, v string) bool) { for _, m := range c.bag.Members() { if !handler(m.Key(), m.Value()) { return } } } func (c *bridgeSpanContext) setBaggageItem(restrictedKey, value string) { crk := http.CanonicalHeaderKey(restrictedKey) m, err := baggage.NewMember(crk, value) if err != nil { return } c.bag, _ = c.bag.SetMember(m) } func (c *bridgeSpanContext) baggageItem(restrictedKey string) baggage.Member { crk := http.CanonicalHeaderKey(restrictedKey) return c.bag.Member(crk) } type bridgeSpan struct { otelSpan trace.Span ctx *bridgeSpanContext tracer *BridgeTracer skipDeferHook bool extraBaggageItems map[string]string } var _ ot.Span = &bridgeSpan{} func newBridgeSpan(otelSpan trace.Span, bridgeSC *bridgeSpanContext, tracer *BridgeTracer) *bridgeSpan { return &bridgeSpan{ otelSpan: otelSpan, ctx: bridgeSC, tracer: tracer, skipDeferHook: false, extraBaggageItems: nil, } } func (s *bridgeSpan) Finish() { s.otelSpan.End() } func (s *bridgeSpan) FinishWithOptions(opts ot.FinishOptions) { var otelOpts []trace.SpanEndOption if !opts.FinishTime.IsZero() { otelOpts = append(otelOpts, trace.WithTimestamp(opts.FinishTime)) } for _, record := range opts.LogRecords { s.logRecord(record) } for _, data := range opts.BulkLogData { s.logRecord(data.ToLogRecord()) } s.otelSpan.End(otelOpts...) } func (s *bridgeSpan) logRecord(record ot.LogRecord) { s.otelSpan.AddEvent( "", trace.WithTimestamp(record.Timestamp), trace.WithAttributes(otLogFieldsToOTelAttrs(record.Fields)...), ) } func (s *bridgeSpan) Context() ot.SpanContext { return s.ctx } func (s *bridgeSpan) SetOperationName(operationName string) ot.Span { s.otelSpan.SetName(operationName) return s } // SetTag method adds a tag to the span. // // Note about the following value conversions: // - int -> int64 // - uint -> string // - int32 -> int64 // - uint32 -> int64 // - uint64 -> string // - float32 -> float64 func (s *bridgeSpan) SetTag(key string, value interface{}) ot.Span { switch key { case string(otext.SpanKind): // TODO: Should we ignore it? case string(otext.Error): if b, ok := value.(bool); ok && b { s.otelSpan.SetStatus(codes.Error, "") } default: s.otelSpan.SetAttributes(otTagToOTelAttr(key, value)) } return s } func (s *bridgeSpan) LogFields(fields ...otlog.Field) { s.otelSpan.AddEvent( "", trace.WithAttributes(otLogFieldsToOTelAttrs(fields)...), ) } type bridgeFieldEncoder struct { pairs []attribute.KeyValue } var _ otlog.Encoder = &bridgeFieldEncoder{} func (e *bridgeFieldEncoder) EmitString(key, value string) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitBool(key string, value bool) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitInt(key string, value int) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitInt32(key string, value int32) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitInt64(key string, value int64) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitUint32(key string, value uint32) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitUint64(key string, value uint64) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitFloat32(key string, value float32) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitFloat64(key string, value float64) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitObject(key string, value interface{}) { e.emitCommon(key, value) } func (e *bridgeFieldEncoder) EmitLazyLogger(value otlog.LazyLogger) { value(e) } func (e *bridgeFieldEncoder) emitCommon(key string, value interface{}) { e.pairs = append(e.pairs, otTagToOTelAttr(key, value)) } func otLogFieldsToOTelAttrs(fields []otlog.Field) []attribute.KeyValue { encoder := &bridgeFieldEncoder{} for _, field := range fields { field.Marshal(encoder) } return encoder.pairs } func (s *bridgeSpan) LogKV(alternatingKeyValues ...interface{}) { fields, err := otlog.InterleavedKVToFields(alternatingKeyValues...) if err != nil { return } s.LogFields(fields...) } func (s *bridgeSpan) SetBaggageItem(restrictedKey, value string) ot.Span { s.updateOTelContext(restrictedKey, value) s.setBaggageItemOnly(restrictedKey, value) return s } func (s *bridgeSpan) setBaggageItemOnly(restrictedKey, value string) { s.ctx.setBaggageItem(restrictedKey, value) } func (s *bridgeSpan) updateOTelContext(restrictedKey, value string) { if s.extraBaggageItems == nil { s.extraBaggageItems = make(map[string]string) } s.extraBaggageItems[restrictedKey] = value } func (s *bridgeSpan) BaggageItem(restrictedKey string) string { return s.ctx.baggageItem(restrictedKey).Value() } func (s *bridgeSpan) Tracer() ot.Tracer { return s.tracer } func (s *bridgeSpan) LogEvent(event string) { s.LogEventWithPayload(event, nil) } func (s *bridgeSpan) LogEventWithPayload(event string, payload interface{}) { data := ot.LogData{ Event: event, Payload: payload, } s.Log(data) } func (s *bridgeSpan) Log(data ot.LogData) { record := data.ToLogRecord() s.LogFields(record.Fields...) } type bridgeSetTracer struct { isSet bool otelTracer trace.Tracer warningHandler BridgeWarningHandler warnOnce sync.Once } func (s *bridgeSetTracer) tracer() trace.Tracer { if !s.isSet { s.warnOnce.Do(func() { s.warningHandler("The OpenTelemetry tracer is not set, default no-op tracer is used! Call SetOpenTelemetryTracer to set it up.\n") }) } return s.otelTracer } // BridgeWarningHandler is a type of handler that receives warnings // from the BridgeTracer. type BridgeWarningHandler func(msg string) // BridgeTracer is an implementation of the OpenTracing tracer, which // translates the calls to the OpenTracing API into OpenTelemetry // counterparts and calls the underlying OpenTelemetry tracer. type BridgeTracer struct { setTracer bridgeSetTracer warningHandler BridgeWarningHandler warnOnce sync.Once propagator propagation.TextMapPropagator } var ( _ ot.Tracer = &BridgeTracer{} _ ot.TracerContextWithSpanExtension = &BridgeTracer{} ) // NewBridgeTracer creates a new BridgeTracer. The new tracer forwards // the calls to the OpenTelemetry Noop tracer, so it should be // overridden with the SetOpenTelemetryTracer function. The warnings // handler does nothing by default, so to override it use the // SetWarningHandler function. func NewBridgeTracer() *BridgeTracer { return &BridgeTracer{ setTracer: bridgeSetTracer{ warningHandler: func(msg string) {}, otelTracer: noopTracer, }, warningHandler: func(msg string) {}, propagator: nil, } } // SetWarningHandler overrides the warning handler. func (t *BridgeTracer) SetWarningHandler(handler BridgeWarningHandler) { t.setTracer.warningHandler = handler t.warningHandler = handler } // SetOpenTelemetryTracer overrides the underlying OpenTelemetry // tracer. The passed tracer should know how to operate in the // environment that uses OpenTracing API. func (t *BridgeTracer) SetOpenTelemetryTracer(tracer trace.Tracer) { t.setTracer.otelTracer = tracer t.setTracer.isSet = true } // SetTextMapPropagator sets propagator as the TextMapPropagator to use by the // BridgeTracer. func (t *BridgeTracer) SetTextMapPropagator(propagator propagation.TextMapPropagator) { t.propagator = propagator } // NewHookedContext returns a Context that has ctx as its parent and is // wrapped to handle baggage set and get operations. func (t *BridgeTracer) NewHookedContext(ctx context.Context) context.Context { ctx = iBaggage.ContextWithSetHook(ctx, t.baggageSetHook) ctx = iBaggage.ContextWithGetHook(ctx, t.baggageGetHook) return ctx } func (t *BridgeTracer) baggageSetHook(ctx context.Context, list iBaggage.List) context.Context { span := ot.SpanFromContext(ctx) if span == nil { t.warningHandler("No active OpenTracing span, can not propagate the baggage items from OpenTelemetry context\n") return ctx } bSpan, ok := span.(*bridgeSpan) if !ok { t.warningHandler("Encountered a foreign OpenTracing span, will not propagate the baggage items from OpenTelemetry context\n") return ctx } for k, v := range list { bSpan.setBaggageItemOnly(k, v.Value) } return ctx } func (t *BridgeTracer) baggageGetHook(ctx context.Context, list iBaggage.List) iBaggage.List { span := ot.SpanFromContext(ctx) if span == nil { t.warningHandler("No active OpenTracing span, can not propagate the baggage items from OpenTracing span context\n") return list } bSpan, ok := span.(*bridgeSpan) if !ok { t.warningHandler("Encountered a foreign OpenTracing span, will not propagate the baggage items from OpenTracing span context\n") return list } items := bSpan.extraBaggageItems if len(items) == 0 { return list } // Privilege of using the internal representation of Baggage here comes // with the responsibility to make sure we maintain its immutability. We // need to return a copy to ensure this. merged := make(iBaggage.List, len(list)) for k, v := range list { merged[k] = v } for k, v := range items { // Overwrite according to OpenTelemetry specification. merged[k] = iBaggage.Item{Value: v} } return merged } // StartSpan is a part of the implementation of the OpenTracing Tracer // interface. func (t *BridgeTracer) StartSpan(operationName string, opts ...ot.StartSpanOption) ot.Span { sso := ot.StartSpanOptions{} for _, opt := range opts { opt.Apply(&sso) } parentBridgeSC, links := otSpanReferencesToParentAndLinks(sso.References) attributes, kind, hadTrueErrorTag := otTagsToOTelAttributesKindAndError(sso.Tags) checkCtx := migration.WithDeferredSetup(context.Background()) if parentBridgeSC != nil { checkCtx = trace.ContextWithRemoteSpanContext(checkCtx, parentBridgeSC.SpanContext) } checkCtx2, otelSpan := t.setTracer.tracer().Start( checkCtx, operationName, trace.WithAttributes(attributes...), trace.WithTimestamp(sso.StartTime), trace.WithLinks(links...), trace.WithSpanKind(kind), ) if ot.SpanFromContext(checkCtx2) != nil { t.warnOnce.Do(func() { t.warningHandler("SDK should have deferred the context setup, see the documentation of go.opentelemetry.io/otel/bridge/opentracing/migration\n") }) } if hadTrueErrorTag { otelSpan.SetStatus(codes.Error, "") } // One does not simply pass a concrete pointer to function // that takes some interface. In case of passing nil concrete // pointer, we get an interface with non-nil type (because the // pointer type is known) and a nil value. Which means // interface is not nil, but calling some interface function // on it will most likely result in nil pointer dereference. var otSpanContext ot.SpanContext if parentBridgeSC != nil { otSpanContext = parentBridgeSC } sctx := newBridgeSpanContext(otelSpan.SpanContext(), otSpanContext) span := newBridgeSpan(otelSpan, sctx, t) return span } // ContextWithBridgeSpan sets up the context with the passed // OpenTelemetry span as the active OpenTracing span. // // This function should be used by the OpenTelemetry tracers that want // to be aware how to operate in the environment using OpenTracing // API. func (t *BridgeTracer) ContextWithBridgeSpan(ctx context.Context, span trace.Span) context.Context { var otSpanContext ot.SpanContext if parentSpan := ot.SpanFromContext(ctx); parentSpan != nil { otSpanContext = parentSpan.Context() } bCtx := newBridgeSpanContext(span.SpanContext(), otSpanContext) bSpan := newBridgeSpan(span, bCtx, t) bSpan.skipDeferHook = true return ot.ContextWithSpan(ctx, bSpan) } // ContextWithSpanHook is an implementation of the OpenTracing tracer // extension interface. It will call the DeferredContextSetupHook // function on the tracer if it implements the // DeferredContextSetupTracerExtension interface. func (t *BridgeTracer) ContextWithSpanHook(ctx context.Context, span ot.Span) context.Context { bSpan, ok := span.(*bridgeSpan) if !ok { t.warningHandler("Encountered a foreign OpenTracing span, will not run a possible deferred context setup hook\n") return ctx } if bSpan.skipDeferHook { return ctx } if tracerWithExtension, ok := bSpan.tracer.setTracer.tracer().(migration.DeferredContextSetupTracerExtension); ok { ctx = tracerWithExtension.DeferredContextSetupHook(ctx, bSpan.otelSpan) } return ctx } func otTagsToOTelAttributesKindAndError(tags map[string]interface{}) ([]attribute.KeyValue, trace.SpanKind, bool) { kind := trace.SpanKindInternal err := false var pairs []attribute.KeyValue for k, v := range tags { switch k { case string(otext.SpanKind): sk := v if s, ok := v.(string); ok { sk = otext.SpanKindEnum(strings.ToLower(s)) } switch sk { case otext.SpanKindRPCClientEnum: kind = trace.SpanKindClient case otext.SpanKindRPCServerEnum: kind = trace.SpanKindServer case otext.SpanKindProducerEnum: kind = trace.SpanKindProducer case otext.SpanKindConsumerEnum: kind = trace.SpanKindConsumer } case string(otext.Error): if b, ok := v.(bool); ok && b { err = true } default: pairs = append(pairs, otTagToOTelAttr(k, v)) } } return pairs, kind, err } // otTagToOTelAttr converts given key-value into attribute.KeyValue. // Note that some conversions are not obvious: // - int -> int64 // - uint -> string // - int32 -> int64 // - uint32 -> int64 // - uint64 -> string // - float32 -> float64 func otTagToOTelAttr(k string, v interface{}) attribute.KeyValue { key := otTagToOTelAttrKey(k) switch val := v.(type) { case bool: return key.Bool(val) case int64: return key.Int64(val) case uint64: return key.String(fmt.Sprintf("%d", val)) case float64: return key.Float64(val) case int8: return key.Int64(int64(val)) case uint8: return key.Int64(int64(val)) case int16: return key.Int64(int64(val)) case uint16: return key.Int64(int64(val)) case int32: return key.Int64(int64(val)) case uint32: return key.Int64(int64(val)) case float32: return key.Float64(float64(val)) case int: return key.Int(val) case uint: return key.String(fmt.Sprintf("%d", val)) case string: return key.String(val) default: return key.String(fmt.Sprint(v)) } } func otTagToOTelAttrKey(k string) attribute.Key { return attribute.Key(k) } func otSpanReferencesToParentAndLinks(references []ot.SpanReference) (*bridgeSpanContext, []trace.Link) { var ( parent *bridgeSpanContext links []trace.Link ) for _, reference := range references { bridgeSC, ok := reference.ReferencedContext.(*bridgeSpanContext) if !ok { // We ignore foreign ot span contexts, // sorry. We have no way of getting any // TraceID and SpanID out of it for form a // OTel SpanContext for OTel Link. And // we can't make it a parent - it also needs a // valid OTel SpanContext. continue } if parent != nil { links = append(links, otSpanReferenceToOTelLink(bridgeSC, reference.Type)) } else { if reference.Type == ot.ChildOfRef { parent = bridgeSC } else { links = append(links, otSpanReferenceToOTelLink(bridgeSC, reference.Type)) } } } return parent, links } func otSpanReferenceToOTelLink(bridgeSC *bridgeSpanContext, refType ot.SpanReferenceType) trace.Link { return trace.Link{ SpanContext: bridgeSC.SpanContext, Attributes: otSpanReferenceTypeToOTelLinkAttributes(refType), } } func otSpanReferenceTypeToOTelLinkAttributes(refType ot.SpanReferenceType) []attribute.KeyValue { return []attribute.KeyValue{ attribute.String("ot-span-reference-type", otSpanReferenceTypeToString(refType)), } } func otSpanReferenceTypeToString(refType ot.SpanReferenceType) string { switch refType { case ot.ChildOfRef: // "extra", because first child-of reference is used // as a parent, so this function isn't even called for // it. return "extra-child-of" case ot.FollowsFromRef: return "follows-from-ref" default: return fmt.Sprintf("unknown-%d", int(refType)) } } // fakeSpan is just a holder of span context, nothing more. It's for // propagators, so they can get the span context from Go context. type fakeSpan struct { trace.Span sc trace.SpanContext } func (s fakeSpan) SpanContext() trace.SpanContext { return s.sc } // Inject is a part of the implementation of the OpenTracing Tracer // interface. // // Currently only the HTTPHeaders and TextMap formats are supported. func (t *BridgeTracer) Inject(sm ot.SpanContext, format interface{}, carrier interface{}) error { bridgeSC, ok := sm.(*bridgeSpanContext) if !ok { return ot.ErrInvalidSpanContext } if !bridgeSC.IsValid() { return ot.ErrInvalidSpanContext } builtinFormat, ok := format.(ot.BuiltinFormat) if !ok { return ot.ErrUnsupportedFormat } var textCarrier propagation.TextMapCarrier var err error switch builtinFormat { case ot.HTTPHeaders: if hhcarrier, ok := carrier.(ot.HTTPHeadersCarrier); ok { textCarrier = propagation.HeaderCarrier(hhcarrier) } else { textCarrier, err = newTextMapWrapperForInject(carrier) } case ot.TextMap: if textCarrier, ok = carrier.(propagation.TextMapCarrier); !ok { textCarrier, err = newTextMapWrapperForInject(carrier) } default: err = ot.ErrUnsupportedFormat } if err != nil { return err } fs := fakeSpan{ Span: noopSpan, sc: bridgeSC.SpanContext, } ctx := trace.ContextWithSpan(context.Background(), fs) ctx = baggage.ContextWithBaggage(ctx, bridgeSC.bag) t.getPropagator().Inject(ctx, textCarrier) return nil } // Extract is a part of the implementation of the OpenTracing Tracer // interface. // // Currently only the HTTPHeaders and TextMap formats are supported. func (t *BridgeTracer) Extract(format interface{}, carrier interface{}) (ot.SpanContext, error) { builtinFormat, ok := format.(ot.BuiltinFormat) if !ok { return nil, ot.ErrUnsupportedFormat } var textCarrier propagation.TextMapCarrier var err error switch builtinFormat { case ot.HTTPHeaders: if hhcarrier, ok := carrier.(ot.HTTPHeadersCarrier); ok { textCarrier = propagation.HeaderCarrier(hhcarrier) } else { textCarrier, err = newTextMapWrapperForExtract(carrier) } case ot.TextMap: if textCarrier, ok = carrier.(propagation.TextMapCarrier); !ok { textCarrier, err = newTextMapWrapperForExtract(carrier) } default: err = ot.ErrUnsupportedFormat } if err != nil { return nil, err } ctx := t.getPropagator().Extract(context.Background(), textCarrier) bag := baggage.FromContext(ctx) bridgeSC := &bridgeSpanContext{ bag: bag, SpanContext: trace.SpanContextFromContext(ctx), } if !bridgeSC.IsValid() { return nil, ot.ErrSpanContextNotFound } return bridgeSC, nil } func (t *BridgeTracer) getPropagator() propagation.TextMapPropagator { if t.propagator != nil { return t.propagator } return otel.GetTextMapPropagator() } // textMapWrapper Provides operating.TextMapWriter and operating.TextMapReader to // propagation.TextMapCarrier compatibility. // Usually, Inject method will only use the write-related interface. // Extract method will only use the reade-related interface. // To avoid panic, // when the carrier implements only one of the interfaces, // it provides a default implementation of the other interface (textMapWriter and textMapReader). type textMapWrapper struct { ot.TextMapWriter ot.TextMapReader readerMap map[string]string } func (t *textMapWrapper) Get(key string) string { if t.readerMap == nil { t.loadMap() } return t.readerMap[key] } func (t *textMapWrapper) Set(key string, value string) { t.TextMapWriter.Set(key, value) } func (t *textMapWrapper) Keys() []string { if t.readerMap == nil { t.loadMap() } str := make([]string, 0, len(t.readerMap)) for key := range t.readerMap { str = append(str, key) } return str } func (t *textMapWrapper) loadMap() { t.readerMap = make(map[string]string) _ = t.ForeachKey(func(key, val string) error { t.readerMap[key] = val return nil }) } func newTextMapWrapperForExtract(carrier interface{}) (*textMapWrapper, error) { t := &textMapWrapper{} reader, ok := carrier.(ot.TextMapReader) if !ok { return nil, ot.ErrInvalidCarrier } t.TextMapReader = reader writer, ok := carrier.(ot.TextMapWriter) if ok { t.TextMapWriter = writer } else { t.TextMapWriter = &textMapWriter{} } return t, nil } func newTextMapWrapperForInject(carrier interface{}) (*textMapWrapper, error) { t := &textMapWrapper{} writer, ok := carrier.(ot.TextMapWriter) if !ok { return nil, ot.ErrInvalidCarrier } t.TextMapWriter = writer reader, ok := carrier.(ot.TextMapReader) if ok { t.TextMapReader = reader } else { t.TextMapReader = &textMapReader{} } return t, nil } type textMapWriter struct{} func (t *textMapWriter) Set(key string, value string) { // maybe print a warning log. } type textMapReader struct{} func (t *textMapReader) ForeachKey(handler func(key, val string) error) error { return nil // maybe print a warning log. } opentelemetry-go-1.21.0/bridge/opentracing/bridge_test.go000066400000000000000000000360261452547353200235020ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opentracing import ( "context" "errors" "fmt" "net/http" "reflect" "strings" "testing" ot "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/bridge/opentracing/internal" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) type testOnlyTextMapReader struct{} func newTestOnlyTextMapReader() *testOnlyTextMapReader { return &testOnlyTextMapReader{} } func (t *testOnlyTextMapReader) ForeachKey(handler func(key string, val string) error) error { _ = handler("key1", "val1") _ = handler("key2", "val2") return nil } type testOnlyTextMapWriter struct { m map[string]string } func newTestOnlyTextMapWriter() *testOnlyTextMapWriter { return &testOnlyTextMapWriter{m: map[string]string{}} } func (t *testOnlyTextMapWriter) Set(key, val string) { t.m[key] = val } type testTextMapReaderAndWriter struct { *testOnlyTextMapReader *testOnlyTextMapWriter } func newTestTextMapReaderAndWriter() *testTextMapReaderAndWriter { return &testTextMapReaderAndWriter{ testOnlyTextMapReader: newTestOnlyTextMapReader(), testOnlyTextMapWriter: newTestOnlyTextMapWriter(), } } func TestTextMapWrapper_New(t *testing.T) { _, err := newTextMapWrapperForExtract(newTestOnlyTextMapReader()) assert.NoError(t, err) _, err = newTextMapWrapperForExtract(newTestOnlyTextMapWriter()) assert.True(t, errors.Is(err, ot.ErrInvalidCarrier)) _, err = newTextMapWrapperForExtract(newTestTextMapReaderAndWriter()) assert.NoError(t, err) _, err = newTextMapWrapperForInject(newTestOnlyTextMapWriter()) assert.NoError(t, err) _, err = newTextMapWrapperForInject(newTestOnlyTextMapReader()) assert.True(t, errors.Is(err, ot.ErrInvalidCarrier)) _, err = newTextMapWrapperForInject(newTestTextMapReaderAndWriter()) assert.NoError(t, err) } func TestTextMapWrapper_action(t *testing.T) { testExtractFunc := func(carrier propagation.TextMapCarrier) { str := carrier.Keys() assert.Len(t, str, 2) assert.Contains(t, str, "key1", "key2") assert.Equal(t, carrier.Get("key1"), "val1") assert.Equal(t, carrier.Get("key2"), "val2") } testInjectFunc := func(carrier propagation.TextMapCarrier) { carrier.Set("key1", "val1") carrier.Set("key2", "val2") wrap, ok := carrier.(*textMapWrapper) assert.True(t, ok) writer, ok := wrap.TextMapWriter.(*testOnlyTextMapWriter) if ok { assert.Contains(t, writer.m, "key1", "key2", "val1", "val2") return } writer2, ok := wrap.TextMapWriter.(*testTextMapReaderAndWriter) assert.True(t, ok) assert.Contains(t, writer2.m, "key1", "key2", "val1", "val2") } onlyWriter, err := newTextMapWrapperForExtract(newTestOnlyTextMapReader()) assert.NoError(t, err) testExtractFunc(onlyWriter) onlyReader, err := newTextMapWrapperForInject(&testOnlyTextMapWriter{m: map[string]string{}}) assert.NoError(t, err) testInjectFunc(onlyReader) both, err := newTextMapWrapperForExtract(newTestTextMapReaderAndWriter()) assert.NoError(t, err) testExtractFunc(both) both, err = newTextMapWrapperForInject(newTestTextMapReaderAndWriter()) assert.NoError(t, err) testInjectFunc(both) } var ( testHeader = "test-trace-id" traceID trace.TraceID = [16]byte{byte(10)} spanID trace.SpanID = [8]byte{byte(11)} ) type testTextMapPropagator struct{} func (t testTextMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { carrier.Set(testHeader, strings.Join([]string{traceID.String(), spanID.String()}, ":")) // Test for panic _ = carrier.Get("test") _ = carrier.Keys() } func (t testTextMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { traces := carrier.Get(testHeader) str := strings.Split(traces, ":") if len(str) != 2 { return ctx } exist := false for _, key := range carrier.Keys() { if strings.EqualFold(testHeader, key) { exist = true break } } if !exist { return ctx } var ( traceID, _ = trace.TraceIDFromHex(str[0]) spanID, _ = trace.SpanIDFromHex(str[1]) sc = trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, }) ) // Test for panic carrier.Set("key", "val") return trace.ContextWithRemoteSpanContext(ctx, sc) } func (t testTextMapPropagator) Fields() []string { return []string{"test"} } // textMapCarrier Implemented propagation.TextMapCarrier interface. type textMapCarrier struct { m map[string]string } var _ propagation.TextMapCarrier = (*textMapCarrier)(nil) func newTextCarrier() *textMapCarrier { return &textMapCarrier{m: map[string]string{}} } func (t *textMapCarrier) Get(key string) string { return t.m[key] } func (t *textMapCarrier) Set(key string, value string) { t.m[key] = value } func (t *textMapCarrier) Keys() []string { str := make([]string, 0, len(t.m)) for key := range t.m { str = append(str, key) } return str } // testTextMapReader only implemented opentracing.TextMapReader interface. type testTextMapReader struct { m *map[string]string } func newTestTextMapReader(m *map[string]string) *testTextMapReader { return &testTextMapReader{m: m} } func (t *testTextMapReader) ForeachKey(handler func(key string, val string) error) error { for key, val := range *t.m { if err := handler(key, val); err != nil { return err } } return nil } // testTextMapWriter only implemented opentracing.TextMapWriter interface. type testTextMapWriter struct { m *map[string]string } func newTestTextMapWriter(m *map[string]string) *testTextMapWriter { return &testTextMapWriter{m: m} } func (t *testTextMapWriter) Set(key, val string) { (*t.m)[key] = val } type samplable interface { IsSampled() bool } func TestBridgeTracer_ExtractAndInject(t *testing.T) { bridge := NewBridgeTracer() bridge.SetTextMapPropagator(new(testTextMapPropagator)) tmc := newTextCarrier() shareMap := map[string]string{} otTextMap := ot.TextMapCarrier{} httpHeader := ot.HTTPHeadersCarrier(http.Header{}) testCases := []struct { name string injectCarrierType ot.BuiltinFormat extractCarrierType ot.BuiltinFormat extractCarrier interface{} injectCarrier interface{} extractErr error injectErr error }{ { name: "support for propagation.TextMapCarrier", injectCarrierType: ot.TextMap, injectCarrier: tmc, extractCarrierType: ot.TextMap, extractCarrier: tmc, }, { name: "support for opentracing.TextMapReader and opentracing.TextMapWriter", injectCarrierType: ot.TextMap, injectCarrier: otTextMap, extractCarrierType: ot.TextMap, extractCarrier: otTextMap, }, { name: "support for HTTPHeaders", injectCarrierType: ot.HTTPHeaders, injectCarrier: httpHeader, extractCarrierType: ot.HTTPHeaders, extractCarrier: httpHeader, }, { name: "support for opentracing.TextMapReader and opentracing.TextMapWriter,non-same instance", injectCarrierType: ot.TextMap, injectCarrier: newTestTextMapWriter(&shareMap), extractCarrierType: ot.TextMap, extractCarrier: newTestTextMapReader(&shareMap), }, { name: "inject: format type is HTTPHeaders, but carrier is not HTTPHeadersCarrier", injectCarrierType: ot.HTTPHeaders, injectCarrier: struct{}{}, injectErr: ot.ErrInvalidCarrier, }, { name: "extract: format type is HTTPHeaders, but carrier is not HTTPHeadersCarrier", injectCarrierType: ot.HTTPHeaders, injectCarrier: httpHeader, extractCarrierType: ot.HTTPHeaders, extractCarrier: struct{}{}, extractErr: ot.ErrInvalidCarrier, }, { name: "inject: format type is TextMap, but carrier is cannot be wrapped into propagation.TextMapCarrier", injectCarrierType: ot.TextMap, injectCarrier: struct{}{}, injectErr: ot.ErrInvalidCarrier, }, { name: "extract: format type is TextMap, but carrier is cannot be wrapped into propagation.TextMapCarrier", injectCarrierType: ot.TextMap, injectCarrier: otTextMap, extractCarrierType: ot.TextMap, extractCarrier: struct{}{}, extractErr: ot.ErrInvalidCarrier, }, { name: "inject: unsupported format type", injectCarrierType: ot.Binary, injectErr: ot.ErrUnsupportedFormat, }, { name: "extract: unsupported format type", injectCarrierType: ot.TextMap, injectCarrier: otTextMap, extractCarrierType: ot.Binary, extractCarrier: struct{}{}, extractErr: ot.ErrUnsupportedFormat, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { err := bridge.Inject(newBridgeSpanContext(trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{byte(1)}, SpanID: [8]byte{byte(2)}, }), nil), tc.injectCarrierType, tc.injectCarrier) assert.Equal(t, tc.injectErr, err) if tc.injectErr == nil { spanContext, err := bridge.Extract(tc.extractCarrierType, tc.extractCarrier) assert.Equal(t, tc.extractErr, err) if tc.extractErr == nil { bsc, ok := spanContext.(*bridgeSpanContext) assert.True(t, ok) require.NotNil(t, bsc) require.NotNil(t, bsc.SpanContext) require.NotNil(t, bsc.SpanID()) require.NotNil(t, bsc.TraceID()) assert.Equal(t, spanID.String(), bsc.SpanID().String()) assert.Equal(t, traceID.String(), bsc.TraceID().String()) } } }) } } type nonDeferWrapperTracer struct { *WrapperTracer } func (t *nonDeferWrapperTracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { // Run start on the parent wrapper with a brand new context // so `WithDeferredSetup` hasn't been called, and the OpenTracing context is injected. return t.WrapperTracer.Start(context.Background(), name, opts...) } func TestBridgeTracer_StartSpan(t *testing.T) { testCases := []struct { name string before func(*testing.T, *BridgeTracer) expectWarnings []string }{ { name: "with no option set", expectWarnings: []string{ "The OpenTelemetry tracer is not set, default no-op tracer is used! Call SetOpenTelemetryTracer to set it up.\n", }, }, { name: "with wrapper tracer set", before: func(t *testing.T, bridge *BridgeTracer) { wTracer := NewWrapperTracer(bridge, otel.Tracer("test")) bridge.SetOpenTelemetryTracer(wTracer) }, expectWarnings: []string(nil), }, { name: "with a non-defered wrapper tracer", before: func(t *testing.T, bridge *BridgeTracer) { wTracer := &nonDeferWrapperTracer{ NewWrapperTracer(bridge, otel.Tracer("test")), } bridge.SetOpenTelemetryTracer(wTracer) }, expectWarnings: []string{ "SDK should have deferred the context setup, see the documentation of go.opentelemetry.io/otel/bridge/opentracing/migration\n", }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { var warningMessages []string bridge := NewBridgeTracer() bridge.SetWarningHandler(func(msg string) { warningMessages = append(warningMessages, msg) }) if tc.before != nil { tc.before(t, bridge) } span := bridge.StartSpan("test") assert.NotNil(t, span) assert.Equal(t, tc.expectWarnings, warningMessages) }) } } func Test_otTagToOTelAttr(t *testing.T) { key := attribute.Key("test") testCases := []struct { value interface{} expected attribute.KeyValue }{ { value: int8(12), expected: key.Int64(int64(12)), }, { value: uint8(12), expected: key.Int64(int64(12)), }, { value: int16(12), expected: key.Int64(int64(12)), }, { value: uint16(12), expected: key.Int64(int64(12)), }, } for _, tc := range testCases { t.Run(fmt.Sprintf("%s %v", reflect.TypeOf(tc.value), tc.value), func(t *testing.T) { att := otTagToOTelAttr(string(key), tc.value) assert.Equal(t, tc.expected, att) }) } } func Test_otTagsToOTelAttributesKindAndError(t *testing.T) { tracer := internal.NewMockTracer() sc := &bridgeSpanContext{} testCases := []struct { name string opt []ot.StartSpanOption expected trace.SpanKind }{ { name: "client", opt: []ot.StartSpanOption{ext.SpanKindRPCClient}, expected: trace.SpanKindClient, }, { name: "server", opt: []ot.StartSpanOption{ext.RPCServerOption(sc)}, expected: trace.SpanKindServer, }, { name: "client string", opt: []ot.StartSpanOption{ot.Tag{Key: "span.kind", Value: "client"}}, expected: trace.SpanKindClient, }, { name: "server string", opt: []ot.StartSpanOption{ot.Tag{Key: "span.kind", Value: "server"}}, expected: trace.SpanKindServer, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { b, _ := NewTracerPair(tracer) s := b.StartSpan(tc.name, tc.opt...) assert.Equal(t, s.(*bridgeSpan).otelSpan.(*internal.MockSpan).SpanKind, tc.expected) }) } } func TestBridge_SpanContext_IsSampled(t *testing.T) { testCases := []struct { name string flags trace.TraceFlags expected bool }{ { name: "not sampled", flags: 0, expected: false, }, { name: "sampled", flags: trace.FlagsSampled, expected: true, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tracer := internal.NewMockTracer() tracer.TraceFlags = tc.flags b, _ := NewTracerPair(tracer) s := b.StartSpan("abc") sc := s.Context() assert.Equal(t, tc.expected, sc.(samplable).IsSampled()) }) } } func TestBridgeSpanContextPromotedMethods(t *testing.T) { bridge := NewBridgeTracer() bridge.SetTextMapPropagator(new(testTextMapPropagator)) tmc := newTextCarrier() type spanContextProvider interface { HasTraceID() bool TraceID() trace.TraceID HasSpanID() bool SpanID() trace.SpanID } err := bridge.Inject(newBridgeSpanContext(trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{byte(1)}, SpanID: [8]byte{byte(2)}, }), nil), ot.TextMap, tmc) assert.NoError(t, err) spanContext, err := bridge.Extract(ot.TextMap, tmc) assert.NoError(t, err) assert.NotPanics(t, func() { assert.Equal(t, spanID.String(), spanContext.(spanContextProvider).SpanID().String()) assert.Equal(t, traceID.String(), spanContext.(spanContextProvider).TraceID().String()) assert.True(t, spanContext.(spanContextProvider).HasSpanID()) assert.True(t, spanContext.(spanContextProvider).HasTraceID()) }) } opentelemetry-go-1.21.0/bridge/opentracing/doc.go000066400000000000000000000131061452547353200217460ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package opentracing implements a bridge that forwards OpenTracing API // calls to the OpenTelemetry SDK. // // To use the bridge, first create an OpenTelemetry tracer of // choice. Then use the NewTracerPair() function to create two tracers // - one implementing OpenTracing API (BridgeTracer) and one that // implements the OpenTelemetry API (WrapperTracer) and mostly // forwards the calls to the OpenTelemetry tracer of choice, but does // some extra steps to make the interaction between both APIs // working. If the OpenTelemetry tracer of choice already knows how to // cooperate with OpenTracing API through the OpenTracing bridge // (explained in detail below), then it is fine to skip the // WrapperTracer by calling the NewBridgeTracer() function to get the // bridge tracer and then passing the chosen OpenTelemetry tracer to // the SetOpenTelemetryTracer() function of the bridge tracer. // // To use an OpenTelemetry span as the parent of an OpenTracing span, // create a context using the ContextWithBridgeSpan() function of // the bridge tracer, and then use the StartSpanFromContext function // of the OpenTracing API. // // Bridge tracer also allows the user to install a warning handler // through the SetWarningHandler() function. The warning handler will // be called when there is some misbehavior of the OpenTelemetry // tracer with regard to the cooperation with the OpenTracing API. // // For an OpenTelemetry tracer to cooperate with OpenTracing API // through the BridgeTracer, the OpenTelemetry tracer needs to // (reasoning is below the list): // // 1. Return the same context it received in the Start() function if // migration.SkipContextSetup() returns true. // // 2. Implement the migration.DeferredContextSetupTracerExtension // interface. The implementation should setup the context it would // normally do in the Start() function if the // migration.SkipContextSetup() function returned false. Calling // ContextWithBridgeSpan() is not necessary. // // 3. Have an access to the BridgeTracer instance. // // 4. If the migration.SkipContextSetup() function returned false, the // tracer should use the ContextWithBridgeSpan() function to install the // created span as an active OpenTracing span. // // There are some differences between OpenTracing and OpenTelemetry // APIs, especially with regard to Go context handling. When a span is // created with an OpenTracing API (through the StartSpan() function) // the Go context is not available. BridgeTracer has access to the // OpenTelemetry tracer of choice, so in the StartSpan() function // BridgeTracer translates the parameters to the OpenTelemetry version // and uses the OpenTelemetry tracer's Start() function to actually // create a span. The OpenTelemetry Start() function takes the Go // context as a parameter, so BridgeTracer at this point passes a // temporary context to Start(). All the changes to the temporary // context will be lost at the end of the StartSpan() function, so the // OpenTelemetry tracer of choice should not do anything with the // context. If the returned context is different, BridgeTracer will // warn about it. The OpenTelemetry tracer of choice can learn about // this situation by using the migration.SkipContextSetup() // function. The tracer will receive an opportunity to set up the // context at a later stage. Usually after StartSpan() is finished, // users of the OpenTracing API are calling (either directly or // through the opentracing.StartSpanFromContext() helper function) the // opentracing.ContextWithSpan() function to insert the created // OpenTracing span into the context. At that time, the OpenTelemetry // tracer of choice has a chance of setting up the context through a // hook invoked inside the opentracing.ContextWithSpan() function. For // that to happen, the tracer should implement the // migration.DeferredContextSetupTracerExtension interface. This so // far explains the need for points 1. and 2. // // When the span is created with the OpenTelemetry API (with the // Start() function) then migration.SkipContextSetup() will return // false. This means that the tracer can do the usual setup of the // context, but it also should set up the active OpenTracing span in // the context. This is because OpenTracing API is not used at all in // the creation of the span, but the OpenTracing API may be used // during the time when the created OpenTelemetry span is current. For // this case to work, we need to also set up active OpenTracing span // in the context. This can be done with the ContextWithBridgeSpan() // function. This means that the OpenTelemetry tracer of choice needs // to have an access to the BridgeTracer instance. This should explain // the need for points 3. and 4. // // Another difference related to the Go context handling is in logging // - OpenTracing API does not take a context parameter in the // LogFields() function, so when the call to the function gets // translated to OpenTelemetry AddEvent() function, an empty context // is passed. package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" opentelemetry-go-1.21.0/bridge/opentracing/go.mod000066400000000000000000000012401452547353200217540ustar00rootroot00000000000000module go.opentelemetry.io/otel/bridge/opentracing go 1.20 replace go.opentelemetry.io/otel => ../.. replace go.opentelemetry.io/otel/trace => ../../trace require ( github.com/opentracing/opentracing-go v1.2.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/metric => ../../metric opentelemetry-go-1.21.0/bridge/opentracing/go.sum000066400000000000000000000034541452547353200220120ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/bridge/opentracing/internal/000077500000000000000000000000001452547353200224655ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opentracing/internal/doc.go000066400000000000000000000012501452547353200235570ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/bridge/opentracing/internal" opentelemetry-go-1.21.0/bridge/opentracing/internal/mock.go000066400000000000000000000172711452547353200237550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/bridge/opentracing/internal" import ( "context" "math/rand" "reflect" "sync" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/bridge/opentracing/migration" "go.opentelemetry.io/otel/codes" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) //nolint:revive // ignoring missing comments for unexported global variables in an internal package. var ( ComponentKey = attribute.Key("component") ServiceKey = attribute.Key("service") StatusCodeKey = attribute.Key("status.code") StatusMessageKey = attribute.Key("status.message") ErrorKey = attribute.Key("error") NameKey = attribute.Key("name") ) type MockContextKeyValue struct { Key interface{} Value interface{} } type MockTracer struct { embedded.Tracer FinishedSpans []*MockSpan SpareTraceIDs []trace.TraceID SpareSpanIDs []trace.SpanID SpareContextKeyValues []MockContextKeyValue TraceFlags trace.TraceFlags randLock sync.Mutex rand *rand.Rand } var ( _ trace.Tracer = &MockTracer{} _ migration.DeferredContextSetupTracerExtension = &MockTracer{} ) func NewMockTracer() *MockTracer { return &MockTracer{ FinishedSpans: nil, SpareTraceIDs: nil, SpareSpanIDs: nil, SpareContextKeyValues: nil, rand: rand.New(rand.NewSource(time.Now().Unix())), } } func (t *MockTracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(opts...) startTime := config.Timestamp() if startTime.IsZero() { startTime = time.Now() } spanContext := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: t.getTraceID(ctx, &config), SpanID: t.getSpanID(), TraceFlags: t.TraceFlags, }) span := &MockSpan{ mockTracer: t, officialTracer: t, spanContext: spanContext, Attributes: config.Attributes(), StartTime: startTime, EndTime: time.Time{}, ParentSpanID: t.getParentSpanID(ctx, &config), Events: nil, SpanKind: trace.ValidateSpanKind(config.SpanKind()), } if !migration.SkipContextSetup(ctx) { ctx = trace.ContextWithSpan(ctx, span) ctx = t.addSpareContextValue(ctx) } return ctx, span } func (t *MockTracer) addSpareContextValue(ctx context.Context) context.Context { if len(t.SpareContextKeyValues) > 0 { pair := t.SpareContextKeyValues[0] t.SpareContextKeyValues[0] = MockContextKeyValue{} t.SpareContextKeyValues = t.SpareContextKeyValues[1:] if len(t.SpareContextKeyValues) == 0 { t.SpareContextKeyValues = nil } ctx = context.WithValue(ctx, pair.Key, pair.Value) } return ctx } func (t *MockTracer) getTraceID(ctx context.Context, config *trace.SpanConfig) trace.TraceID { if parent := t.getParentSpanContext(ctx, config); parent.IsValid() { return parent.TraceID() } if len(t.SpareTraceIDs) > 0 { traceID := t.SpareTraceIDs[0] t.SpareTraceIDs = t.SpareTraceIDs[1:] if len(t.SpareTraceIDs) == 0 { t.SpareTraceIDs = nil } return traceID } return t.getRandTraceID() } func (t *MockTracer) getParentSpanID(ctx context.Context, config *trace.SpanConfig) trace.SpanID { if parent := t.getParentSpanContext(ctx, config); parent.IsValid() { return parent.SpanID() } return trace.SpanID{} } func (t *MockTracer) getParentSpanContext(ctx context.Context, config *trace.SpanConfig) trace.SpanContext { if !config.NewRoot() { return trace.SpanContextFromContext(ctx) } return trace.SpanContext{} } func (t *MockTracer) getSpanID() trace.SpanID { if len(t.SpareSpanIDs) > 0 { spanID := t.SpareSpanIDs[0] t.SpareSpanIDs = t.SpareSpanIDs[1:] if len(t.SpareSpanIDs) == 0 { t.SpareSpanIDs = nil } return spanID } return t.getRandSpanID() } func (t *MockTracer) getRandSpanID() trace.SpanID { t.randLock.Lock() defer t.randLock.Unlock() sid := trace.SpanID{} _, _ = t.rand.Read(sid[:]) return sid } func (t *MockTracer) getRandTraceID() trace.TraceID { t.randLock.Lock() defer t.randLock.Unlock() tid := trace.TraceID{} _, _ = t.rand.Read(tid[:]) return tid } func (t *MockTracer) DeferredContextSetupHook(ctx context.Context, span trace.Span) context.Context { return t.addSpareContextValue(ctx) } type MockEvent struct { Timestamp time.Time Name string Attributes []attribute.KeyValue } type MockSpan struct { embedded.Span mockTracer *MockTracer officialTracer trace.Tracer spanContext trace.SpanContext SpanKind trace.SpanKind recording bool Attributes []attribute.KeyValue StartTime time.Time EndTime time.Time ParentSpanID trace.SpanID Events []MockEvent } var ( _ trace.Span = &MockSpan{} _ migration.OverrideTracerSpanExtension = &MockSpan{} ) func (s *MockSpan) SpanContext() trace.SpanContext { return s.spanContext } func (s *MockSpan) IsRecording() bool { return s.recording } func (s *MockSpan) SetStatus(code codes.Code, msg string) { s.SetAttributes(StatusCodeKey.Int(int(code)), StatusMessageKey.String(msg)) } func (s *MockSpan) SetName(name string) { s.SetAttributes(NameKey.String(name)) } func (s *MockSpan) SetError(v bool) { s.SetAttributes(ErrorKey.Bool(v)) } func (s *MockSpan) SetAttributes(attributes ...attribute.KeyValue) { s.applyUpdate(attributes) } func (s *MockSpan) applyUpdate(update []attribute.KeyValue) { updateM := make(map[attribute.Key]attribute.Value, len(update)) for _, kv := range update { updateM[kv.Key] = kv.Value } seen := make(map[attribute.Key]struct{}) for i, kv := range s.Attributes { if v, ok := updateM[kv.Key]; ok { s.Attributes[i].Value = v seen[kv.Key] = struct{}{} } } for k, v := range updateM { if _, ok := seen[k]; ok { continue } s.Attributes = append(s.Attributes, attribute.KeyValue{Key: k, Value: v}) } } func (s *MockSpan) End(options ...trace.SpanEndOption) { if !s.EndTime.IsZero() { return // already finished } config := trace.NewSpanEndConfig(options...) endTime := config.Timestamp() if endTime.IsZero() { endTime = time.Now() } s.EndTime = endTime s.mockTracer.FinishedSpans = append(s.mockTracer.FinishedSpans, s) } func (s *MockSpan) RecordError(err error, opts ...trace.EventOption) { if err == nil { return // no-op on nil error } if !s.EndTime.IsZero() { return // already finished } s.SetStatus(codes.Error, "") opts = append(opts, trace.WithAttributes( semconv.ExceptionType(reflect.TypeOf(err).String()), semconv.ExceptionMessage(err.Error()), )) s.AddEvent(semconv.ExceptionEventName, opts...) } func (s *MockSpan) Tracer() trace.Tracer { return s.officialTracer } func (s *MockSpan) AddEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) s.Events = append(s.Events, MockEvent{ Timestamp: c.Timestamp(), Name: name, Attributes: c.Attributes(), }) } func (s *MockSpan) OverrideTracer(tracer trace.Tracer) { s.officialTracer = tracer } func (s *MockSpan) TracerProvider() trace.TracerProvider { return noop.NewTracerProvider() } opentelemetry-go-1.21.0/bridge/opentracing/migration/000077500000000000000000000000001452547353200226425ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opentracing/migration/api.go000066400000000000000000000065271452547353200237540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package migration provides interfaces and functions that are useful for // providing a cooperation of the OpenTelemetry tracers with the // OpenTracing API. package migration // import "go.opentelemetry.io/otel/bridge/opentracing/migration" import ( "context" "go.opentelemetry.io/otel/trace" ) // DeferredContextSetupTracerExtension is an interface an // OpenTelemetry tracer may implement in order to cooperate with the // calls to the OpenTracing API. // // Tracers implementing this interface should also use the // SkipContextSetup() function during creation of the span in the // Start() function to skip the configuration of the context. type DeferredContextSetupTracerExtension interface { // DeferredContextSetupHook is called by the bridge // OpenTracing tracer when opentracing.ContextWithSpan is // called. This allows the OpenTelemetry tracer to set up the // context in a way it would normally do during the Start() // function. Since OpenTracing API does not support // configuration of the context during span creation, it needs // to be deferred until the call to the // opentracing.ContextWithSpan happens. When bridge // OpenTracing tracer calls OpenTelemetry tracer's Start() // function, it passes a context that shouldn't be modified. DeferredContextSetupHook(ctx context.Context, span trace.Span) context.Context } // OverrideTracerSpanExtension is an interface an OpenTelemetry span // may implement in order to cooperate with the calls to the // OpenTracing API. // // TODO(krnowak): I'm actually not so sold on the idea… The reason for // introducing this interface was to have a span "created" by the // WrapperTracer return WrapperTracer from the Tracer() function, not // the real OpenTelemetry tracer that actually created the span. I'm // thinking that I could create a wrapperSpan type that wraps an // OpenTelemetry Span object and have WrapperTracer to alter the // current OpenTelemetry span in the context so it points to the // wrapped object, so the code in the tracer like // `trace.SpanFromContent().(*realSpan)` would still work. Another // argument for getting rid of this interface is that is only called // by the WrapperTracer - WrapperTracer likely shouldn't require any // changes in the underlying OpenTelemetry tracer to have things // somewhat working. // // See the "tracer mess" test in mix_test.go. type OverrideTracerSpanExtension interface { // OverrideTracer makes the span to return the passed tracer // from its Tracer() function. // // You don't need to implement this function if your // OpenTelemetry tracer cooperates well with the OpenTracing // API calls. In such case, there is no need to use the // WrapperTracer and thus no need to override the result of // the Tracer() function. OverrideTracer(tracer trace.Tracer) } opentelemetry-go-1.21.0/bridge/opentracing/migration/defer.go000066400000000000000000000026551452547353200242660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package migration // import "go.opentelemetry.io/otel/bridge/opentracing/migration" import ( "context" ) type doDeferredContextSetupType struct{} var ( doDeferredContextSetupTypeKey = doDeferredContextSetupType{} doDeferredContextSetupTypeValue = doDeferredContextSetupType{} ) // WithDeferredSetup returns a context that can tell the OpenTelemetry // tracer to skip the context setup in the Start() function. func WithDeferredSetup(ctx context.Context) context.Context { return context.WithValue(ctx, doDeferredContextSetupTypeKey, doDeferredContextSetupTypeValue) } // SkipContextSetup can tell the OpenTelemetry tracer to skip the // context setup during the span creation in the Start() function. func SkipContextSetup(ctx context.Context) bool { _, ok := ctx.Value(doDeferredContextSetupTypeKey).(doDeferredContextSetupType) return ok } opentelemetry-go-1.21.0/bridge/opentracing/mix_test.go000066400000000000000000000510161452547353200230370ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opentracing import ( "context" "fmt" "testing" ot "github.com/opentracing/opentracing-go" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/bridge/opentracing/internal" "go.opentelemetry.io/otel/trace" ) type mixedAPIsTestCase struct { desc string setup func(*testing.T, *internal.MockTracer) run func(*testing.T, context.Context) check func(*testing.T, *internal.MockTracer) } func getMixedAPIsTestCases() []mixedAPIsTestCase { st := newSimpleTest() cast := newCurrentActiveSpanTest() coin := newContextIntactTest() bip := newBaggageItemsPreservationTest() bio := newBaggageInteroperationTest() return []mixedAPIsTestCase{ { desc: "simple otel -> ot -> otel", setup: st.setup, run: st.runOtelOTOtel, check: st.check, }, { desc: "simple ot -> otel -> ot", setup: st.setup, run: st.runOTOtelOT, check: st.check, }, { desc: "current/active span otel -> ot -> otel", setup: cast.setup, run: cast.runOtelOTOtel, check: cast.check, }, { desc: "current/active span ot -> otel -> ot", setup: cast.setup, run: cast.runOTOtelOT, check: cast.check, }, { desc: "context intact otel -> ot -> otel", setup: coin.setup, run: coin.runOtelOTOtel, check: coin.check, }, { desc: "context intact ot -> otel -> ot", setup: coin.setup, run: coin.runOTOtelOT, check: coin.check, }, { desc: "baggage items preservation across layers otel -> ot -> otel", setup: bip.setup, run: bip.runOtelOTOtel, check: bip.check, }, { desc: "baggage items preservation across layers ot -> otel -> ot", setup: bip.setup, run: bip.runOTOtelOT, check: bip.check, }, { desc: "baggage items interoperation across layers ot -> otel -> ot", setup: bio.setup, run: bio.runOTOtelOT, check: bio.check, }, { desc: "baggage items interoperation across layers otel -> ot -> otel", setup: bio.setup, run: bio.runOtelOTOtel, check: bio.check, }, } } func TestMixedAPIs(t *testing.T) { for idx, tc := range getMixedAPIsTestCases() { t.Logf("Running test case %d: %s", idx, tc.desc) mockOtelTracer := internal.NewMockTracer() ctx, otTracer, otelProvider := NewTracerPairWithContext(context.Background(), mockOtelTracer) otTracer.SetWarningHandler(func(msg string) { t.Log(msg) }) otel.SetTracerProvider(otelProvider) ot.SetGlobalTracer(otTracer) tc.setup(t, mockOtelTracer) tc.run(t, ctx) tc.check(t, mockOtelTracer) } } // simple test type simpleTest struct { traceID trace.TraceID spanIDs []trace.SpanID } func newSimpleTest() *simpleTest { return &simpleTest{ traceID: simpleTraceID(), spanIDs: simpleSpanIDs(3), } } func (st *simpleTest) setup(t *testing.T, tracer *internal.MockTracer) { tracer.SpareTraceIDs = append(tracer.SpareTraceIDs, st.traceID) tracer.SpareSpanIDs = append(tracer.SpareSpanIDs, st.spanIDs...) } func (st *simpleTest) check(t *testing.T, tracer *internal.MockTracer) { checkTraceAndSpans(t, tracer, st.traceID, st.spanIDs) } func (st *simpleTest) runOtelOTOtel(t *testing.T, ctx context.Context) { runOtelOTOtel(t, ctx, "simple", st.noop) } func (st *simpleTest) runOTOtelOT(t *testing.T, ctx context.Context) { runOTOtelOT(t, ctx, "simple", st.noop) } func (st *simpleTest) noop(t *testing.T, ctx context.Context) context.Context { return ctx } // current/active span test type currentActiveSpanTest struct { traceID trace.TraceID spanIDs []trace.SpanID recordedCurrentOtelSpanIDs []trace.SpanID recordedActiveOTSpanIDs []trace.SpanID } func newCurrentActiveSpanTest() *currentActiveSpanTest { return ¤tActiveSpanTest{ traceID: simpleTraceID(), spanIDs: simpleSpanIDs(3), } } func (cast *currentActiveSpanTest) setup(t *testing.T, tracer *internal.MockTracer) { tracer.SpareTraceIDs = append(tracer.SpareTraceIDs, cast.traceID) tracer.SpareSpanIDs = append(tracer.SpareSpanIDs, cast.spanIDs...) cast.recordedCurrentOtelSpanIDs = nil cast.recordedActiveOTSpanIDs = nil } func (cast *currentActiveSpanTest) check(t *testing.T, tracer *internal.MockTracer) { checkTraceAndSpans(t, tracer, cast.traceID, cast.spanIDs) if len(cast.recordedCurrentOtelSpanIDs) != len(cast.spanIDs) { t.Errorf("Expected to have %d recorded Otel current spans, got %d", len(cast.spanIDs), len(cast.recordedCurrentOtelSpanIDs)) } if len(cast.recordedActiveOTSpanIDs) != len(cast.spanIDs) { t.Errorf("Expected to have %d recorded OT active spans, got %d", len(cast.spanIDs), len(cast.recordedActiveOTSpanIDs)) } minLen := min(len(cast.recordedCurrentOtelSpanIDs), len(cast.spanIDs)) minLen = min(minLen, len(cast.recordedActiveOTSpanIDs)) for i := 0; i < minLen; i++ { if cast.recordedCurrentOtelSpanIDs[i] != cast.spanIDs[i] { t.Errorf("Expected span idx %d (%d) to be recorded as current span in Otel, got %d", i, cast.spanIDs[i], cast.recordedCurrentOtelSpanIDs[i]) } if cast.recordedActiveOTSpanIDs[i] != cast.spanIDs[i] { t.Errorf("Expected span idx %d (%d) to be recorded as active span in OT, got %d", i, cast.spanIDs[i], cast.recordedActiveOTSpanIDs[i]) } } } func (cast *currentActiveSpanTest) runOtelOTOtel(t *testing.T, ctx context.Context) { runOtelOTOtel(t, ctx, "cast", cast.recordSpans) } func (cast *currentActiveSpanTest) runOTOtelOT(t *testing.T, ctx context.Context) { runOTOtelOT(t, ctx, "cast", cast.recordSpans) } func (cast *currentActiveSpanTest) recordSpans(t *testing.T, ctx context.Context) context.Context { spanID := trace.SpanContextFromContext(ctx).SpanID() cast.recordedCurrentOtelSpanIDs = append(cast.recordedCurrentOtelSpanIDs, spanID) spanID = trace.SpanID{} if bridgeSpan, ok := ot.SpanFromContext(ctx).(*bridgeSpan); ok { spanID = bridgeSpan.otelSpan.SpanContext().SpanID() } cast.recordedActiveOTSpanIDs = append(cast.recordedActiveOTSpanIDs, spanID) return ctx } // context intact test type contextIntactTest struct { contextKeyValues []internal.MockContextKeyValue recordedContextValues []interface{} recordIdx int } type coin1Key struct{} type coin1Value struct{} type coin2Key struct{} type coin2Value struct{} type coin3Key struct{} type coin3Value struct{} func newContextIntactTest() *contextIntactTest { return &contextIntactTest{ contextKeyValues: []internal.MockContextKeyValue{ { Key: coin1Key{}, Value: coin1Value{}, }, { Key: coin2Key{}, Value: coin2Value{}, }, { Key: coin3Key{}, Value: coin3Value{}, }, }, } } func (coin *contextIntactTest) setup(t *testing.T, tracer *internal.MockTracer) { tracer.SpareContextKeyValues = append(tracer.SpareContextKeyValues, coin.contextKeyValues...) coin.recordedContextValues = nil coin.recordIdx = 0 } func (coin *contextIntactTest) check(t *testing.T, tracer *internal.MockTracer) { if len(coin.recordedContextValues) != len(coin.contextKeyValues) { t.Errorf("Expected to have %d recorded context values, got %d", len(coin.contextKeyValues), len(coin.recordedContextValues)) } minLen := min(len(coin.recordedContextValues), len(coin.contextKeyValues)) for i := 0; i < minLen; i++ { key := coin.contextKeyValues[i].Key value := coin.contextKeyValues[i].Value gotValue := coin.recordedContextValues[i] if value != gotValue { t.Errorf("Expected value %#v for key %#v, got %#v", value, key, gotValue) } } } func (coin *contextIntactTest) runOtelOTOtel(t *testing.T, ctx context.Context) { runOtelOTOtel(t, ctx, "coin", coin.recordValue) } func (coin *contextIntactTest) runOTOtelOT(t *testing.T, ctx context.Context) { runOTOtelOT(t, ctx, "coin", coin.recordValue) } func (coin *contextIntactTest) recordValue(t *testing.T, ctx context.Context) context.Context { if coin.recordIdx >= len(coin.contextKeyValues) { t.Errorf("Too many steps?") return ctx } key := coin.contextKeyValues[coin.recordIdx].Key coin.recordIdx++ coin.recordedContextValues = append(coin.recordedContextValues, ctx.Value(key)) return ctx } // baggage items preservation test type bipBaggage struct { key string value string } type baggageItemsPreservationTest struct { baggageItems []bipBaggage step int recordedBaggage []map[string]string } func newBaggageItemsPreservationTest() *baggageItemsPreservationTest { return &baggageItemsPreservationTest{ baggageItems: []bipBaggage{ { key: "First", value: "one", }, { key: "Second", value: "two", }, { key: "Third", value: "three", }, }, } } func (bip *baggageItemsPreservationTest) setup(t *testing.T, tracer *internal.MockTracer) { bip.step = 0 bip.recordedBaggage = nil } func (bip *baggageItemsPreservationTest) check(t *testing.T, tracer *internal.MockTracer) { if len(bip.recordedBaggage) != len(bip.baggageItems) { t.Errorf("Expected %d recordings, got %d", len(bip.baggageItems), len(bip.recordedBaggage)) } minLen := min(len(bip.recordedBaggage), len(bip.baggageItems)) for i := 0; i < minLen; i++ { recordedItems := bip.recordedBaggage[i] if len(recordedItems) != i+1 { t.Errorf("Expected %d recorded baggage items in recording %d, got %d", i+1, i+1, len(bip.recordedBaggage[i])) } minItemLen := min(len(bip.baggageItems), i+1) for j := 0; j < minItemLen; j++ { expectedItem := bip.baggageItems[j] if gotValue, ok := recordedItems[expectedItem.key]; !ok { t.Errorf("Missing baggage item %q in recording %d", expectedItem.key, i+1) } else if gotValue != expectedItem.value { t.Errorf("Expected recorded baggage item %q in recording %d + 1to be %q, got %q", expectedItem.key, i, expectedItem.value, gotValue) } else { delete(recordedItems, expectedItem.key) } } for key, value := range recordedItems { t.Errorf("Unexpected baggage item in recording %d: %q -> %q", i+1, key, value) } } } func (bip *baggageItemsPreservationTest) runOtelOTOtel(t *testing.T, ctx context.Context) { runOtelOTOtel(t, ctx, "bip", bip.addAndRecordBaggage) } func (bip *baggageItemsPreservationTest) runOTOtelOT(t *testing.T, ctx context.Context) { runOTOtelOT(t, ctx, "bip", bip.addAndRecordBaggage) } func (bip *baggageItemsPreservationTest) addAndRecordBaggage(t *testing.T, ctx context.Context) context.Context { if bip.step >= len(bip.baggageItems) { t.Errorf("Too many steps?") return ctx } span := ot.SpanFromContext(ctx) if span == nil { t.Errorf("No active OpenTracing span") return ctx } idx := bip.step bip.step++ span.SetBaggageItem(bip.baggageItems[idx].key, bip.baggageItems[idx].value) sctx := span.Context() recording := make(map[string]string) sctx.ForeachBaggageItem(func(key, value string) bool { recording[key] = value return true }) bip.recordedBaggage = append(bip.recordedBaggage, recording) return ctx } // baggage interoperation test type baggageInteroperationTest struct { baggageItems []bipBaggage step int recordedOTBaggage []map[string]string recordedOtelBaggage []map[string]string } func newBaggageInteroperationTest() *baggageInteroperationTest { return &baggageInteroperationTest{ baggageItems: []bipBaggage{ { key: "First", value: "one", }, { key: "Second", value: "two", }, { key: "Third", value: "three", }, }, } } func (bio *baggageInteroperationTest) setup(t *testing.T, tracer *internal.MockTracer) { bio.step = 0 bio.recordedOTBaggage = nil bio.recordedOtelBaggage = nil } func (bio *baggageInteroperationTest) check(t *testing.T, tracer *internal.MockTracer) { checkBIORecording(t, "OT", bio.baggageItems, bio.recordedOTBaggage) checkBIORecording(t, "Otel", bio.baggageItems, bio.recordedOtelBaggage) } func checkBIORecording(t *testing.T, apiDesc string, initialItems []bipBaggage, recordings []map[string]string) { // expect recordings count to equal the number of initial // items // each recording should have a duplicated item from initial // items, one with OT suffix, another one with Otel suffix // expect each subsequent recording to have two more items, up // to double of the count of the initial items if len(initialItems) != len(recordings) { t.Errorf("Expected %d recordings from %s, got %d", len(initialItems), apiDesc, len(recordings)) } minRecLen := min(len(initialItems), len(recordings)) for i := 0; i < minRecLen; i++ { recordedItems := recordings[i] expectedItemsInStep := (i + 1) * 2 if expectedItemsInStep != len(recordedItems) { t.Errorf("Expected %d recorded items in recording %d from %s, got %d", expectedItemsInStep, i, apiDesc, len(recordedItems)) } recordedItemsCopy := make(map[string]string, len(recordedItems)) for k, v := range recordedItems { recordedItemsCopy[k] = v } for j := 0; j < i+1; j++ { otKey, otelKey := generateBaggageKeys(initialItems[j].key) value := initialItems[j].value for _, k := range []string{otKey, otelKey} { if v, ok := recordedItemsCopy[k]; ok { if value != v { t.Errorf("Expected value %s under key %s in recording %d from %s, got %s", value, k, i, apiDesc, v) } delete(recordedItemsCopy, k) } else { t.Errorf("Missing key %s in recording %d from %s", k, i, apiDesc) } } } for k, v := range recordedItemsCopy { t.Errorf("Unexpected key-value pair %s = %s in recording %d from %s", k, v, i, apiDesc) } } } func (bio *baggageInteroperationTest) runOtelOTOtel(t *testing.T, ctx context.Context) { runOtelOTOtel(t, ctx, "bio", bio.addAndRecordBaggage) } func (bio *baggageInteroperationTest) runOTOtelOT(t *testing.T, ctx context.Context) { runOTOtelOT(t, ctx, "bio", bio.addAndRecordBaggage) } func (bio *baggageInteroperationTest) addAndRecordBaggage(t *testing.T, ctx context.Context) context.Context { if bio.step >= len(bio.baggageItems) { t.Errorf("Too many steps?") return ctx } otSpan := ot.SpanFromContext(ctx) if otSpan == nil { t.Errorf("No active OpenTracing span") return ctx } idx := bio.step bio.step++ key := bio.baggageItems[idx].key otKey, otelKey := generateBaggageKeys(key) value := bio.baggageItems[idx].value otSpan.SetBaggageItem(otKey, value) m, err := baggage.NewMember(otelKey, value) if err != nil { t.Error(err) return ctx } b, err := baggage.FromContext(ctx).SetMember(m) if err != nil { t.Error(err) return ctx } ctx = baggage.ContextWithBaggage(ctx, b) otRecording := make(map[string]string) otSpan.Context().ForeachBaggageItem(func(key, value string) bool { otRecording[key] = value return true }) otelRecording := make(map[string]string) for _, m := range baggage.FromContext(ctx).Members() { otelRecording[m.Key()] = m.Value() } bio.recordedOTBaggage = append(bio.recordedOTBaggage, otRecording) bio.recordedOtelBaggage = append(bio.recordedOtelBaggage, otelRecording) return ctx } func generateBaggageKeys(key string) (otKey, otelKey string) { otKey, otelKey = key+"-Ot", key+"-Otel" return } // helpers func checkTraceAndSpans(t *testing.T, tracer *internal.MockTracer, expectedTraceID trace.TraceID, expectedSpanIDs []trace.SpanID) { expectedSpanCount := len(expectedSpanIDs) // reverse spanIDs, since first span ID belongs to root, that // finishes last spanIDs := make([]trace.SpanID, len(expectedSpanIDs)) copy(spanIDs, expectedSpanIDs) reverse(len(spanIDs), func(i, j int) { spanIDs[i], spanIDs[j] = spanIDs[j], spanIDs[i] }) // the last finished span has no parent parentSpanIDs := append(spanIDs[1:], trace.SpanID{}) sks := map[trace.SpanID]trace.SpanKind{ {125}: trace.SpanKindProducer, {124}: trace.SpanKindInternal, {123}: trace.SpanKindClient, } if len(tracer.FinishedSpans) != expectedSpanCount { t.Errorf("Expected %d finished spans, got %d", expectedSpanCount, len(tracer.FinishedSpans)) } for idx, span := range tracer.FinishedSpans { sctx := span.SpanContext() if sctx.TraceID() != expectedTraceID { t.Errorf("Expected trace ID %v in span %d (%d), got %v", expectedTraceID, idx, sctx.SpanID(), sctx.TraceID()) } expectedSpanID := spanIDs[idx] expectedParentSpanID := parentSpanIDs[idx] if sctx.SpanID() != expectedSpanID { t.Errorf("Expected finished span %d to have span ID %d, but got %d", idx, expectedSpanID, sctx.SpanID()) } if span.ParentSpanID != expectedParentSpanID { t.Errorf("Expected finished span %d (span ID: %d) to have parent span ID %d, but got %d", idx, sctx.SpanID(), expectedParentSpanID, span.ParentSpanID) } if span.SpanKind != sks[span.SpanContext().SpanID()] { t.Errorf("Expected finished span %d (span ID: %d) to have span.kind to be '%v' but was '%v'", idx, sctx.SpanID(), sks[span.SpanContext().SpanID()], span.SpanKind) } } } func reverse(length int, swap func(i, j int)) { for left, right := 0, length-1; left < right; left, right = left+1, right-1 { swap(left, right) } } func simpleTraceID() trace.TraceID { return [16]byte{123, 42} } func simpleSpanIDs(count int) []trace.SpanID { base := []trace.SpanID{ {123}, {124}, {125}, {126}, {127}, {128}, } return base[:count] } func min(a, b int) int { if a > b { return b } return a } func runOtelOTOtel(t *testing.T, ctx context.Context, name string, callback func(*testing.T, context.Context) context.Context) { tr := otel.Tracer("") ctx, span := tr.Start(ctx, fmt.Sprintf("%s_Otel_OTOtel", name), trace.WithSpanKind(trace.SpanKindClient)) defer span.End() ctx = callback(t, ctx) func(ctx2 context.Context) { span, ctx2 := ot.StartSpanFromContext(ctx2, fmt.Sprintf("%sOtel_OT_Otel", name)) defer span.Finish() ctx2 = callback(t, ctx2) func(ctx3 context.Context) { ctx3, span := tr.Start(ctx3, fmt.Sprintf("%sOtelOT_Otel_", name), trace.WithSpanKind(trace.SpanKindProducer)) defer span.End() _ = callback(t, ctx3) }(ctx2) }(ctx) } func runOTOtelOT(t *testing.T, ctx context.Context, name string, callback func(*testing.T, context.Context) context.Context) { tr := otel.Tracer("") span, ctx := ot.StartSpanFromContext(ctx, fmt.Sprintf("%s_OT_OtelOT", name), ot.Tag{Key: "span.kind", Value: "client"}) defer span.Finish() ctx = callback(t, ctx) func(ctx2 context.Context) { ctx2, span := tr.Start(ctx2, fmt.Sprintf("%sOT_Otel_OT", name)) defer span.End() ctx2 = callback(t, ctx2) func(ctx3 context.Context) { span, ctx3 := ot.StartSpanFromContext(ctx3, fmt.Sprintf("%sOTOtel_OT_", name), ot.Tag{Key: "span.kind", Value: "producer"}) defer span.Finish() _ = callback(t, ctx3) }(ctx2) }(ctx) } func TestOtTagToOTelAttrCheckTypeConversions(t *testing.T) { tableTest := []struct { key string value interface{} expectedValueType attribute.Type }{ { key: "bool to bool", value: true, expectedValueType: attribute.BOOL, }, { key: "int to int64", value: 123, expectedValueType: attribute.INT64, }, { key: "uint to string", value: uint(1234), expectedValueType: attribute.STRING, }, { key: "int32 to int64", value: int32(12345), expectedValueType: attribute.INT64, }, { key: "uint32 to int64", value: uint32(123456), expectedValueType: attribute.INT64, }, { key: "int64 to int64", value: int64(1234567), expectedValueType: attribute.INT64, }, { key: "uint64 to string", value: uint64(12345678), expectedValueType: attribute.STRING, }, { key: "float32 to float64", value: float32(3.14), expectedValueType: attribute.FLOAT64, }, { key: "float64 to float64", value: float64(3.14), expectedValueType: attribute.FLOAT64, }, { key: "string to string", value: "string_value", expectedValueType: attribute.STRING, }, { key: "unexpected type to string", value: struct{}{}, expectedValueType: attribute.STRING, }, } for _, test := range tableTest { got := otTagToOTelAttr(test.key, test.value) if test.expectedValueType != got.Value.Type() { t.Errorf("Expected type %s, but got %s after conversion '%v' value", test.expectedValueType, got.Value.Type(), test.value) } } } opentelemetry-go-1.21.0/bridge/opentracing/provider.go000066400000000000000000000041361452547353200230360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "sync" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) // TracerProvider is an OpenTelemetry TracerProvider that wraps an OpenTracing // Tracer. type TracerProvider struct { embedded.TracerProvider bridge *BridgeTracer provider trace.TracerProvider tracers map[wrappedTracerKey]*WrapperTracer mtx sync.Mutex } var _ trace.TracerProvider = (*TracerProvider)(nil) // NewTracerProvider returns a new TracerProvider that creates new instances of // WrapperTracer from the given TracerProvider. func NewTracerProvider(bridge *BridgeTracer, provider trace.TracerProvider) *TracerProvider { return &TracerProvider{ bridge: bridge, provider: provider, tracers: make(map[wrappedTracerKey]*WrapperTracer), } } type wrappedTracerKey struct { name string version string } // Tracer creates a WrappedTracer that wraps the OpenTelemetry tracer for each call to // Tracer(). Repeated calls to Tracer() with the same configuration will look up and // return an existing instance of WrapperTracer. func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { p.mtx.Lock() defer p.mtx.Unlock() c := trace.NewTracerConfig(opts...) key := wrappedTracerKey{ name: name, version: c.InstrumentationVersion(), } if t, ok := p.tracers[key]; ok { return t } wrapper := NewWrapperTracer(p.bridge, p.provider.Tracer(name, opts...)) p.tracers[key] = wrapper return wrapper } opentelemetry-go-1.21.0/bridge/opentracing/provider_test.go000066400000000000000000000051761452547353200241020ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opentracing import ( "testing" "go.opentelemetry.io/otel/bridge/opentracing/internal" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) type namedMockTracer struct { name string *internal.MockTracer } type namedMockTracerProvider struct{ embedded.TracerProvider } var _ trace.TracerProvider = (*namedMockTracerProvider)(nil) // Tracer returns the WrapperTracer associated with the WrapperTracerProvider. func (p *namedMockTracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { return &namedMockTracer{ name: name, MockTracer: internal.NewMockTracer(), } } func TestTracerProvider(t *testing.T) { // assertMockTracerName casts tracer into a named mock tracer provided by // namedMockTracerProvider, and asserts against its name assertMockTracerName := func(t *testing.T, tracer trace.Tracer, name string) { // Unwrap the tracer wrapped := tracer.(*WrapperTracer) tracer = wrapped.tracer // Cast into the underlying type and assert if mock, ok := tracer.(*namedMockTracer); ok { if name != mock.name { t.Errorf("expected name %q, got %q", name, mock.name) } } else if !ok { t.Errorf("expected *namedMockTracer, got %T", mock) } } var ( foobar = "foobar" bazbar = "bazbar" provider = NewTracerProvider(nil, &namedMockTracerProvider{}) ) t.Run("Tracers should be created with foobar from provider", func(t *testing.T) { tracer := provider.Tracer(foobar) assertMockTracerName(t, tracer, foobar) }) t.Run("Repeated requests to create a tracer should provide the existing tracer", func(t *testing.T) { tracer1 := provider.Tracer(foobar) assertMockTracerName(t, tracer1, foobar) tracer2 := provider.Tracer(foobar) assertMockTracerName(t, tracer2, foobar) tracer3 := provider.Tracer(bazbar) assertMockTracerName(t, tracer3, bazbar) if tracer1 != tracer2 { t.Errorf("expected the same tracer, got different tracers") } if tracer1 == tracer3 || tracer2 == tracer3 { t.Errorf("expected different tracers, got the same tracer") } }) } opentelemetry-go-1.21.0/bridge/opentracing/test/000077500000000000000000000000001452547353200216305ustar00rootroot00000000000000opentelemetry-go-1.21.0/bridge/opentracing/test/bridge_grpc_test.go000066400000000000000000000061311452547353200254660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "context" "net" "testing" "time" otgrpc "github.com/opentracing-contrib/go-grpc" testpb "github.com/opentracing-contrib/go-grpc/test/otgrpc_testing" ot "github.com/opentracing/opentracing-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" ototel "go.opentelemetry.io/otel/bridge/opentracing" "go.opentelemetry.io/otel/bridge/opentracing/internal" "go.opentelemetry.io/otel/propagation" ) type testGRPCServer struct{} func (*testGRPCServer) UnaryCall(ctx context.Context, r *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { return &testpb.SimpleResponse{Payload: r.Payload * 2}, nil } func (*testGRPCServer) StreamingOutputCall(*testpb.SimpleRequest, testpb.TestService_StreamingOutputCallServer) error { return nil } func (*testGRPCServer) StreamingInputCall(testpb.TestService_StreamingInputCallServer) error { return nil } func (*testGRPCServer) StreamingBidirectionalCall(testpb.TestService_StreamingBidirectionalCallServer) error { return nil } func startTestGRPCServer(t *testing.T, tracer ot.Tracer) (*grpc.Server, net.Addr) { lis, _ := net.Listen("tcp", ":0") server := grpc.NewServer( grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer)), ) testpb.RegisterTestServiceServer(server, &testGRPCServer{}) go func() { err := server.Serve(lis) require.NoError(t, err) }() return server, lis.Addr() } func TestBridgeTracer_ExtractAndInject_gRPC(t *testing.T) { tracer := internal.NewMockTracer() bridge := ototel.NewBridgeTracer() bridge.SetOpenTelemetryTracer(tracer) bridge.SetTextMapPropagator(propagation.TraceContext{}) srv, addr := startTestGRPCServer(t, bridge) defer srv.Stop() conn, err := grpc.Dial( addr.String(), grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(bridge)), ) require.NoError(t, err) cli := testpb.NewTestServiceClient(conn) ctx, cx := context.WithTimeout(context.Background(), 10*time.Second) defer cx() res, err := cli.UnaryCall(ctx, &testpb.SimpleRequest{Payload: 42}) require.NoError(t, err) assert.EqualValues(t, 84, res.Payload) checkSpans := func() bool { return len(tracer.FinishedSpans) == 2 } require.Eventuallyf(t, checkSpans, 5*time.Second, 5*time.Millisecond, "expecting two spans") assert.Equal(t, tracer.FinishedSpans[0].SpanContext().TraceID(), tracer.FinishedSpans[1].SpanContext().TraceID(), "expecting same trace ID", ) } opentelemetry-go-1.21.0/bridge/opentracing/test/go.mod000066400000000000000000000023021452547353200227330ustar00rootroot00000000000000module go.opentelemetry.io/otel/bridge/opentracing/test go 1.20 replace go.opentelemetry.io/otel => ../../.. replace go.opentelemetry.io/otel/bridge/opentracing => ../ replace go.opentelemetry.io/otel/trace => ../../../trace require ( github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e github.com/opentracing/opentracing-go v1.2.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/bridge/opentracing v1.21.0 google.golang.org/grpc v1.59.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/metric => ../../../metric opentelemetry-go-1.21.0/bridge/opentracing/test/go.sum000066400000000000000000000143421452547353200227670ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= opentelemetry-go-1.21.0/bridge/opentracing/util.go000066400000000000000000000036331452547353200221620ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "context" "go.opentelemetry.io/otel/trace" ) // NewTracerPair is a utility function that creates a BridgeTracer and a // WrapperTracerProvider. WrapperTracerProvider creates a single instance of // WrapperTracer. The BridgeTracer forwards the calls to the WrapperTracer // that wraps the passed tracer. BridgeTracer and WrapperTracerProvider are // returned to the caller and the caller is expected to register BridgeTracer // with opentracing and WrapperTracerProvider with opentelemetry. func NewTracerPair(tracer trace.Tracer) (*BridgeTracer, *WrapperTracerProvider) { bridgeTracer := NewBridgeTracer() wrapperProvider := NewWrappedTracerProvider(bridgeTracer, tracer) bridgeTracer.SetOpenTelemetryTracer(wrapperProvider.Tracer("")) return bridgeTracer, wrapperProvider } // NewTracerPairWithContext is a convenience function. It calls NewTracerPair // and returns a hooked version of ctx with the created BridgeTracer along // with the BridgeTracer and WrapperTracerProvider. func NewTracerPairWithContext(ctx context.Context, tracer trace.Tracer) (context.Context, *BridgeTracer, *WrapperTracerProvider) { bridgeTracer, wrapperProvider := NewTracerPair(tracer) ctx = bridgeTracer.NewHookedContext(ctx) return ctx, bridgeTracer, wrapperProvider } opentelemetry-go-1.21.0/bridge/opentracing/wrapper.go000066400000000000000000000075401452547353200226660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package opentracing // import "go.opentelemetry.io/otel/bridge/opentracing" import ( "context" "go.opentelemetry.io/otel/bridge/opentracing/migration" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) // WrapperTracerProvider is an OpenTelemetry TracerProvider that wraps an // OpenTracing Tracer, created by the deprecated NewWrappedTracerProvider. // // Deprecated: Use the TracerProvider from NewTracerProvider(...) instead. type WrapperTracerProvider struct { embedded.TracerProvider wTracer *WrapperTracer } var _ trace.TracerProvider = (*WrapperTracerProvider)(nil) // Tracer returns the WrapperTracer associated with the WrapperTracerProvider. func (p *WrapperTracerProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer { return p.wTracer } // NewWrappedTracerProvider creates a new trace provider that creates a single // instance of WrapperTracer that wraps OpenTelemetry tracer, and always returns // it unmodified from Tracer(). // // Deprecated: Use NewTracerProvider(...) instead. func NewWrappedTracerProvider(bridge *BridgeTracer, tracer trace.Tracer) *WrapperTracerProvider { return &WrapperTracerProvider{ wTracer: NewWrapperTracer(bridge, tracer), } } // WrapperTracer is a wrapper around an OpenTelemetry tracer. It // mostly forwards the calls to the wrapped tracer, but also does some // extra steps like setting up a context with the active OpenTracing // span. // // It does not need to be used when the OpenTelemetry tracer is also // aware how to operate in environment where OpenTracing API is also // used. type WrapperTracer struct { embedded.Tracer bridge *BridgeTracer tracer trace.Tracer } var ( _ trace.Tracer = &WrapperTracer{} _ migration.DeferredContextSetupTracerExtension = &WrapperTracer{} ) // NewWrapperTracer wraps the passed tracer and also talks to the // passed bridge tracer when setting up the context with the new // active OpenTracing span. func NewWrapperTracer(bridge *BridgeTracer, tracer trace.Tracer) *WrapperTracer { return &WrapperTracer{ bridge: bridge, tracer: tracer, } } func (t *WrapperTracer) otelTracer() trace.Tracer { return t.tracer } // Start forwards the call to the wrapped tracer. It also tries to // override the tracer of the returned span if the span implements the // OverrideTracerSpanExtension interface. func (t *WrapperTracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { ctx, span := t.otelTracer().Start(ctx, name, opts...) if spanWithExtension, ok := span.(migration.OverrideTracerSpanExtension); ok { spanWithExtension.OverrideTracer(t) } if !migration.SkipContextSetup(ctx) { ctx = t.bridge.ContextWithBridgeSpan(ctx, span) } return ctx, span } // DeferredContextSetupHook is a part of the implementation of the // DeferredContextSetupTracerExtension interface. It will try to // forward the call to the wrapped tracer if it implements the // interface. func (t *WrapperTracer) DeferredContextSetupHook(ctx context.Context, span trace.Span) context.Context { if tracerWithExtension, ok := t.otelTracer().(migration.DeferredContextSetupTracerExtension); ok { ctx = tracerWithExtension.DeferredContextSetupHook(ctx, span) } ctx = trace.ContextWithSpan(ctx, span) return ctx } opentelemetry-go-1.21.0/codes/000077500000000000000000000000001452547353200162015ustar00rootroot00000000000000opentelemetry-go-1.21.0/codes/codes.go000066400000000000000000000056351452547353200176360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package codes // import "go.opentelemetry.io/otel/codes" import ( "encoding/json" "fmt" "strconv" ) const ( // Unset is the default status code. Unset Code = 0 // Error indicates the operation contains an error. // // NOTE: The error code in OTLP is 2. // The value of this enum is only relevant to the internals // of the Go SDK. Error Code = 1 // Ok indicates operation has been validated by an Application developers // or Operator to have completed successfully, or contain no error. // // NOTE: The Ok code in OTLP is 1. // The value of this enum is only relevant to the internals // of the Go SDK. Ok Code = 2 maxCode = 3 ) // Code is an 32-bit representation of a status state. type Code uint32 var codeToStr = map[Code]string{ Unset: "Unset", Error: "Error", Ok: "Ok", } var strToCode = map[string]Code{ `"Unset"`: Unset, `"Error"`: Error, `"Ok"`: Ok, } // String returns the Code as a string. func (c Code) String() string { return codeToStr[c] } // UnmarshalJSON unmarshals b into the Code. // // This is based on the functionality in the gRPC codes package: // https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 func (c *Code) UnmarshalJSON(b []byte) error { // From json.Unmarshaler: By convention, to approximate the behavior of // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as // a no-op. if string(b) == "null" { return nil } if c == nil { return fmt.Errorf("nil receiver passed to UnmarshalJSON") } var x interface{} if err := json.Unmarshal(b, &x); err != nil { return err } switch x.(type) { case string: if jc, ok := strToCode[string(b)]; ok { *c = jc return nil } return fmt.Errorf("invalid code: %q", string(b)) case float64: if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { if ci >= maxCode { return fmt.Errorf("invalid code: %q", ci) } *c = Code(ci) return nil } return fmt.Errorf("invalid code: %q", string(b)) default: return fmt.Errorf("invalid code: %q", string(b)) } } // MarshalJSON returns c as the JSON encoding of c. func (c *Code) MarshalJSON() ([]byte, error) { if c == nil { return []byte("null"), nil } str, ok := codeToStr[*c] if !ok { return nil, fmt.Errorf("invalid code: %d", *c) } return []byte(fmt.Sprintf("%q", str)), nil } opentelemetry-go-1.21.0/codes/codes_test.go000066400000000000000000000073651452547353200206770ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package codes import ( "bytes" "encoding/json" "fmt" "testing" ) func TestCodeString(t *testing.T) { tests := []struct { code Code want string }{ {Unset, "Unset"}, {Error, "Error"}, {Ok, "Ok"}, } for _, test := range tests { if got := test.code.String(); got != test.want { t.Errorf("String of code %d %q, want %q", test.code, got, test.want) } } } func TestCodeUnmarshalJSONNull(t *testing.T) { c := new(Code) orig := c if err := c.UnmarshalJSON([]byte("null")); err != nil { t.Fatalf("Code.UnmarshalJSON(\"null\") errored: %v", err) } if orig != c { t.Error("Code.UnmarshalJSON(\"null\") should not decode a value") } } func TestCodeUnmarshalJSONNil(t *testing.T) { c := (*Code)(nil) if err := c.UnmarshalJSON([]byte{}); err == nil { t.Fatalf("Code(nil).UnmarshalJSON() did not error") } } func TestCodeUnmarshalJSON(t *testing.T) { tests := []struct { input string want Code }{ {"0", Unset}, {`"Unset"`, Unset}, {"1", Error}, {`"Error"`, Error}, {"2", Ok}, {`"Ok"`, Ok}, } for _, test := range tests { c := new(Code) *c = Code(maxCode) if err := json.Unmarshal([]byte(test.input), c); err != nil { t.Fatalf("json.Unmarshal(%q, Code) errored: %v", test.input, err) } if *c != test.want { t.Errorf("failed to unmarshal %q as %v", test.input, test.want) } } } func TestCodeUnmarshalJSONErrorInvalidData(t *testing.T) { tests := []string{ fmt.Sprintf("%d", maxCode), "Not a code", "Unset", "true", `"Not existing"`, "", } c := new(Code) for _, test := range tests { if err := json.Unmarshal([]byte(test), c); err == nil { t.Fatalf("json.Unmarshal(%q, Code) did not error", test) } } } func TestCodeMarshalJSONNil(t *testing.T) { c := (*Code)(nil) b, err := c.MarshalJSON() if err != nil { t.Fatalf("Code(nil).MarshalJSON() errored: %v", err) } if !bytes.Equal(b, []byte("null")) { t.Errorf("Code(nil).MarshalJSON() returned %s, want \"null\"", string(b)) } } func TestCodeMarshalJSON(t *testing.T) { tests := []struct { code Code want string }{ {Unset, `"Unset"`}, {Error, `"Error"`}, {Ok, `"Ok"`}, } for _, test := range tests { b, err := test.code.MarshalJSON() if err != nil { t.Fatalf("Code(%s).MarshalJSON() errored: %v", test.code, err) } if !bytes.Equal(b, []byte(test.want)) { t.Errorf("Code(%s).MarshalJSON() returned %s, want %s", test.code, string(b), test.want) } } } func TestCodeMarshalJSONErrorInvalid(t *testing.T) { c := new(Code) *c = Code(maxCode) if b, err := c.MarshalJSON(); err == nil { t.Fatalf("Code(maxCode).MarshalJSON() did not error") } else if b != nil { t.Fatal("Code(maxCode).MarshalJSON() returned non-nil value") } } func TestRoundTripCodes(t *testing.T) { tests := []struct { input Code }{ {Unset}, {Error}, {Ok}, } for _, test := range tests { c := test.input out := new(Code) b, err := c.MarshalJSON() if err != nil { t.Fatalf("Code(%s).MarshalJSON() errored: %v", test.input, err) } if err := out.UnmarshalJSON(b); err != nil { t.Fatalf("Code.UnmarshalJSON(%q) errored: %v", c, err) } if *out != test.input { t.Errorf("failed to round trip %q, output was %v", test.input, out) } } } opentelemetry-go-1.21.0/codes/doc.go000066400000000000000000000016011452547353200172730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package codes defines the canonical error codes used by OpenTelemetry. It conforms to [the OpenTelemetry specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). */ package codes // import "go.opentelemetry.io/otel/codes" opentelemetry-go-1.21.0/doc.go000066400000000000000000000027341452547353200162060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package otel provides global access to the OpenTelemetry API. The subpackages of the otel package provide an implementation of the OpenTelemetry API. The provided API is used to instrument code and measure data about that code's performance and operation. The measured data, by default, is not processed or transmitted anywhere. An implementation of the OpenTelemetry SDK, like the default SDK implementation (go.opentelemetry.io/otel/sdk), and associated exporters are used to process and transport this data. To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ package otel // import "go.opentelemetry.io/otel" opentelemetry-go-1.21.0/error_handler.go000066400000000000000000000025661452547353200202720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" // ErrorHandler handles irremediable events. type ErrorHandler interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Handle handles any error deemed irremediable by an OpenTelemetry // component. Handle(error) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } // ErrorHandlerFunc is a convenience adapter to allow the use of a function // as an ErrorHandler. type ErrorHandlerFunc func(error) var _ ErrorHandler = ErrorHandlerFunc(nil) // Handle handles the irremediable error by calling the ErrorHandlerFunc itself. func (f ErrorHandlerFunc) Handle(err error) { f(err) } opentelemetry-go-1.21.0/example/000077500000000000000000000000001452547353200165375ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/dice/000077500000000000000000000000001452547353200174435ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/dice/doc.go000066400000000000000000000012471452547353200205430ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Dice is the "Roll the dice" getting started example application. package main opentelemetry-go-1.21.0/example/dice/go.mod000066400000000000000000000021621452547353200205520ustar00rootroot00000000000000module go.opentelemetry.io/otel/example/dice go 1.20 require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.21.0 ) require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect ) replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../../exporters/stdout/stdouttrace replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../../exporters/stdout/stdoutmetric replace go.opentelemetry.io/otel => ../.. replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/metric => ../../metric replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric replace go.opentelemetry.io/otel/sdk => ../../sdk opentelemetry-go-1.21.0/example/dice/go.sum000066400000000000000000000025551452547353200206050ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= opentelemetry-go-1.21.0/example/dice/main.go000066400000000000000000000050651452547353200207240ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "errors" "log" "net" "net/http" "os" "os/signal" "time" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" ) func main() { if err := run(); err != nil { log.Fatalln(err) } } func run() (err error) { // Handle SIGINT (CTRL+C) gracefully. ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) defer stop() // Set up OpenTelemetry. serviceName := "dice" serviceVersion := "0.1.0" otelShutdown, err := setupOTelSDK(ctx, serviceName, serviceVersion) if err != nil { return } // Handle shutdown properly so nothing leaks. defer func() { err = errors.Join(err, otelShutdown(context.Background())) }() // Start HTTP server. srv := &http.Server{ Addr: ":8080", BaseContext: func(_ net.Listener) context.Context { return ctx }, ReadTimeout: time.Second, WriteTimeout: 10 * time.Second, Handler: newHTTPHandler(), } srvErr := make(chan error, 1) go func() { srvErr <- srv.ListenAndServe() }() // Wait for interruption. select { case err = <-srvErr: // Error when starting HTTP server. return case <-ctx.Done(): // Wait for first CTRL+C. // Stop receiving signal notifications as soon as possible. stop() } // When Shutdown is called, ListenAndServe immediately returns ErrServerClosed. err = srv.Shutdown(context.Background()) return } func newHTTPHandler() http.Handler { mux := http.NewServeMux() // handleFunc is a replacement for mux.HandleFunc // which enriches the handler's HTTP instrumentation with the pattern as the http.route. handleFunc := func(pattern string, handlerFunc func(http.ResponseWriter, *http.Request)) { // Configure the "http.route" for the HTTP instrumentation. handler := otelhttp.WithRouteTag(pattern, http.HandlerFunc(handlerFunc)) mux.Handle(pattern, handler) } // Register handlers. handleFunc("/rolldice", rolldice) // Add HTTP instrumentation for the whole server. handler := otelhttp.NewHandler(mux, "/") return handler } opentelemetry-go-1.21.0/example/dice/otel.go000066400000000000000000000072631452547353200207450ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "errors" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) // setupOTelSDK bootstraps the OpenTelemetry pipeline. // If it does not return an error, make sure to call shutdown for proper cleanup. func setupOTelSDK(ctx context.Context, serviceName, serviceVersion string) (shutdown func(context.Context) error, err error) { var shutdownFuncs []func(context.Context) error // shutdown calls cleanup functions registered via shutdownFuncs. // The errors from the calls are joined. // Each registered cleanup will be invoked once. shutdown = func(ctx context.Context) error { var err error for _, fn := range shutdownFuncs { err = errors.Join(err, fn(ctx)) } shutdownFuncs = nil return err } // handleErr calls shutdown for cleanup and makes sure that all errors are returned. handleErr := func(inErr error) { err = errors.Join(inErr, shutdown(ctx)) } // Set up resource. res, err := newResource(serviceName, serviceVersion) if err != nil { handleErr(err) return } // Set up propagator. prop := newPropagator() otel.SetTextMapPropagator(prop) // Set up trace provider. tracerProvider, err := newTraceProvider(res) if err != nil { handleErr(err) return } shutdownFuncs = append(shutdownFuncs, tracerProvider.Shutdown) otel.SetTracerProvider(tracerProvider) // Set up meter provider. meterProvider, err := newMeterProvider(res) if err != nil { handleErr(err) return } shutdownFuncs = append(shutdownFuncs, meterProvider.Shutdown) otel.SetMeterProvider(meterProvider) return } func newResource(serviceName, serviceVersion string) (*resource.Resource, error) { return resource.Merge(resource.Default(), resource.NewWithAttributes(semconv.SchemaURL, semconv.ServiceName(serviceName), semconv.ServiceVersion(serviceVersion), )) } func newPropagator() propagation.TextMapPropagator { return propagation.NewCompositeTextMapPropagator( propagation.TraceContext{}, propagation.Baggage{}, ) } func newTraceProvider(res *resource.Resource) (*trace.TracerProvider, error) { traceExporter, err := stdouttrace.New( stdouttrace.WithPrettyPrint()) if err != nil { return nil, err } traceProvider := trace.NewTracerProvider( trace.WithBatcher(traceExporter, // Default is 5s. Set to 1s for demonstrative purposes. trace.WithBatchTimeout(time.Second)), trace.WithResource(res), ) return traceProvider, nil } func newMeterProvider(res *resource.Resource) (*metric.MeterProvider, error) { metricExporter, err := stdoutmetric.New() if err != nil { return nil, err } meterProvider := metric.NewMeterProvider( metric.WithResource(res), metric.WithReader(metric.NewPeriodicReader(metricExporter, // Default is 1m. Set to 3s for demonstrative purposes. metric.WithInterval(3*time.Second))), ) return meterProvider, nil } opentelemetry-go-1.21.0/example/dice/rolldice.go000066400000000000000000000027551452547353200216000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "io" "log" "math/rand" "net/http" "strconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" ) var ( tracer = otel.Tracer("rolldice") meter = otel.Meter("rolldice") rollCnt metric.Int64Counter ) func init() { var err error rollCnt, err = meter.Int64Counter("dice.rolls", metric.WithDescription("The number of rolls by roll value"), metric.WithUnit("{roll}")) if err != nil { panic(err) } } func rolldice(w http.ResponseWriter, r *http.Request) { ctx, span := tracer.Start(r.Context(), "roll") defer span.End() roll := 1 + rand.Intn(6) rollValueAttr := attribute.Int("roll.value", roll) span.SetAttributes(rollValueAttr) rollCnt.Add(ctx, 1, metric.WithAttributes(rollValueAttr)) resp := strconv.Itoa(roll) + "\n" if _, err := io.WriteString(w, resp); err != nil { log.Printf("Write failed: %v\n", err) } } opentelemetry-go-1.21.0/example/namedtracer/000077500000000000000000000000001452547353200210245ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/namedtracer/foo/000077500000000000000000000000001452547353200216075ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/namedtracer/foo/foo.go000066400000000000000000000025401452547353200227220ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package foo // import "go.opentelemetry.io/otel/example/namedtracer/foo" import ( "context" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) var lemonsKey = attribute.Key("ex.com/lemons") // SubOperation is an example to demonstrate the use of named tracer. // It creates a named tracer with its package path. func SubOperation(ctx context.Context) error { // Using global provider. Alternative is to have application provide a getter // for its component to get the instance of the provider. tr := otel.Tracer("example/namedtracer/foo") var span trace.Span _, span = tr.Start(ctx, "Sub operation...") defer span.End() span.SetAttributes(lemonsKey.String("five")) span.AddEvent("Sub span event") return nil } opentelemetry-go-1.21.0/example/namedtracer/go.mod000066400000000000000000000013351452547353200221340ustar00rootroot00000000000000module go.opentelemetry.io/otel/example/namedtracer go 1.20 replace ( go.opentelemetry.io/otel => ../.. go.opentelemetry.io/otel/sdk => ../../sdk ) require ( github.com/go-logr/stdr v1.2.2 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/go-logr/logr v1.3.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect ) replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../../exporters/stdout/stdouttrace replace go.opentelemetry.io/otel/metric => ../../metric opentelemetry-go-1.21.0/example/namedtracer/go.sum000066400000000000000000000017131452547353200221610ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= opentelemetry-go-1.21.0/example/namedtracer/main.go000066400000000000000000000045501452547353200223030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "github.com/go-logr/stdr" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/example/namedtracer/foo" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) var ( fooKey = attribute.Key("ex.com/foo") barKey = attribute.Key("ex.com/bar") anotherKey = attribute.Key("ex.com/another") ) var tp *sdktrace.TracerProvider // initTracer creates and registers trace provider instance. func initTracer() error { exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) if err != nil { return fmt.Errorf("failed to initialize stdouttrace exporter: %w", err) } bsp := sdktrace.NewBatchSpanProcessor(exp) tp = sdktrace.NewTracerProvider( sdktrace.WithSampler(sdktrace.AlwaysSample()), sdktrace.WithSpanProcessor(bsp), ) otel.SetTracerProvider(tp) return nil } func main() { // Set logging level to info to see SDK status messages stdr.SetVerbosity(5) // initialize trace provider. if err := initTracer(); err != nil { log.Panic(err) } // Create a named tracer with package path as its name. tracer := tp.Tracer("example/namedtracer/main") ctx := context.Background() defer func() { _ = tp.Shutdown(ctx) }() m0, _ := baggage.NewMember(string(fooKey), "foo1") m1, _ := baggage.NewMember(string(barKey), "bar1") b, _ := baggage.New(m0, m1) ctx = baggage.ContextWithBaggage(ctx, b) var span trace.Span ctx, span = tracer.Start(ctx, "operation") defer span.End() span.AddEvent("Nice operation!", trace.WithAttributes(attribute.Int("bogons", 100))) span.SetAttributes(anotherKey.String("yes")) if err := foo.SubOperation(ctx); err != nil { panic(err) } } opentelemetry-go-1.21.0/example/opencensus/000077500000000000000000000000001452547353200207215ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/opencensus/go.mod000066400000000000000000000023531452547353200220320ustar00rootroot00000000000000module go.opentelemetry.io/otel/example/opencensus go 1.20 replace ( go.opentelemetry.io/otel => ../.. go.opentelemetry.io/otel/bridge/opencensus => ../../bridge/opencensus go.opentelemetry.io/otel/sdk => ../../sdk ) require ( go.opencensus.io v0.24.0 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/bridge/opencensus v0.44.0 go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.21.0 ) require ( github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect ) replace go.opentelemetry.io/otel/metric => ../../metric replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../../exporters/stdout/stdoutmetric replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../../exporters/stdout/stdouttrace opentelemetry-go-1.21.0/example/opencensus/go.sum000066400000000000000000000240601452547353200220560ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= opentelemetry-go-1.21.0/example/opencensus/main.go000066400000000000000000000123121452547353200221730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "time" ocmetric "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" octrace "go.opencensus.io/trace" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/bridge/opencensus" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/sdk/metric" sdktrace "go.opentelemetry.io/otel/sdk/trace" ) var ( // instrumenttype differentiates between our gauge and view metrics. keyType = tag.MustNewKey("instrumenttype") // Counts the number of lines read in from standard input. countMeasure = stats.Int64("test_count", "A count of something", stats.UnitDimensionless) countView = &view.View{ Name: "test_count", Measure: countMeasure, Description: "A count of something", Aggregation: view.Count(), TagKeys: []tag.Key{keyType}, } ) func main() { log.Println("Using OpenTelemetry stdout exporters.") traceExporter, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) if err != nil { log.Fatal(fmt.Errorf("error creating trace exporter: %w", err)) } metricsExporter, err := stdoutmetric.New() if err != nil { log.Fatal(fmt.Errorf("error creating metric exporter: %w", err)) } tracing(traceExporter) if err := monitoring(metricsExporter); err != nil { log.Fatal(err) } } // tracing demonstrates overriding the OpenCensus DefaultTracer to send spans // to the OpenTelemetry exporter by calling OpenCensus APIs. func tracing(otExporter sdktrace.SpanExporter) { ctx := context.Background() log.Println("Configuring OpenCensus. Not Registering any OpenCensus exporters.") octrace.ApplyConfig(octrace.Config{DefaultSampler: octrace.AlwaysSample()}) tp := sdktrace.NewTracerProvider(sdktrace.WithBatcher(otExporter)) otel.SetTracerProvider(tp) log.Println("Installing the OpenCensus bridge to make OpenCensus libraries write spans using OpenTelemetry.") opencensus.InstallTraceBridge() tp.ForceFlush(ctx) log.Println("Creating OpenCensus span, which should be printed out using the OpenTelemetry stdouttrace exporter.\n-- It should have no parent, since it is the first span.") ctx, outerOCSpan := octrace.StartSpan(ctx, "OpenCensusOuterSpan") outerOCSpan.End() tp.ForceFlush(ctx) log.Println("Creating OpenTelemetry span\n-- It should have the OpenCensus span as a parent, since the OpenCensus span was written with using OpenTelemetry APIs.") ctx, otspan := tp.Tracer("simple").Start(ctx, "OpenTelemetrySpan") otspan.End() tp.ForceFlush(ctx) log.Println("Creating OpenCensus span, which should be printed out using the OpenTelemetry stdouttrace exporter.\n-- It should have the OpenTelemetry span as a parent, since it was written using OpenTelemetry APIs") _, innerOCSpan := octrace.StartSpan(ctx, "OpenCensusInnerSpan") innerOCSpan.End() tp.ForceFlush(ctx) } // monitoring demonstrates creating an IntervalReader using the OpenTelemetry // exporter to send metrics to the exporter by using either an OpenCensus // registry or an OpenCensus view. func monitoring(exporter metric.Exporter) error { log.Println("Adding the OpenCensus metric Producer to an OpenTelemetry Reader to export OpenCensus metrics using the OpenTelemetry stdout exporter.") // Register the OpenCensus metric Producer to add metrics from OpenCensus to the output. reader := metric.NewPeriodicReader(exporter, metric.WithProducer(opencensus.NewMetricProducer())) metric.NewMeterProvider(metric.WithReader(reader)) log.Println("Registering a gauge metric using an OpenCensus registry.") r := ocmetric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) gauge, err := r.AddInt64Gauge( "test_gauge", ocmetric.WithDescription("A gauge for testing"), ocmetric.WithConstLabel(map[metricdata.LabelKey]metricdata.LabelValue{ {Key: keyType.Name()}: metricdata.NewLabelValue("gauge"), }), ) if err != nil { return fmt.Errorf("failed to add gauge: %w", err) } entry, err := gauge.GetEntry() if err != nil { return fmt.Errorf("failed to get gauge entry: %w", err) } log.Println("Registering a cumulative metric using an OpenCensus view.") if err := view.Register(countView); err != nil { return fmt.Errorf("failed to register views: %w", err) } ctx, err := tag.New(context.Background(), tag.Insert(keyType, "view")) if err != nil { return fmt.Errorf("failed to set tag: %w", err) } for i := int64(1); true; i++ { // update stats for our gauge entry.Set(i) // update stats for our view stats.Record(ctx, countMeasure.M(1)) time.Sleep(time.Second) } return nil } opentelemetry-go-1.21.0/example/otel-collector/000077500000000000000000000000001452547353200214665ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/otel-collector/Makefile000066400000000000000000000016611452547353200231320ustar00rootroot00000000000000JAEGER_OPERATOR_VERSION = v1.36.0 namespace-k8s: kubectl apply -f k8s/namespace.yaml jaeger-operator-k8s: # Create the jaeger operator and necessary artifacts in ns observability kubectl create -n observability -f https://github.com/jaegertracing/jaeger-operator/releases/download/$(JAEGER_OPERATOR_VERSION)/jaeger-operator.yaml jaeger-k8s: kubectl apply -f k8s/jaeger.yaml prometheus-k8s: kubectl apply -f k8s/prometheus-service.yaml # Prometheus instance kubectl apply -f k8s/prometheus-monitor.yaml # Service monitor otel-collector-k8s: kubectl apply -f k8s/otel-collector.yaml clean-k8s: - kubectl delete -f k8s/otel-collector.yaml - kubectl delete -f k8s/prometheus-monitor.yaml - kubectl delete -f k8s/prometheus-service.yaml - kubectl delete -f k8s/jaeger.yaml - kubectl delete -n observability -f https://github.com/jaegertracing/jaeger-operator/releases/download/$(JAEGER_OPERATOR_VERSION)/jaeger-operator.yaml opentelemetry-go-1.21.0/example/otel-collector/README.md000066400000000000000000000154511452547353200227530ustar00rootroot00000000000000# OpenTelemetry Collector Traces Example This example illustrates how to export trace and metric data from the OpenTelemetry-Go SDK to the OpenTelemetry Collector. From there, we bring the trace data to Jaeger and the metric data to Prometheus The complete flow is: ``` -----> Jaeger (trace) App + SDK ---> OpenTelemetry Collector ---| -----> Prometheus (metrics) ``` # Prerequisites You will need access to a Kubernetes cluster for this demo. We use a local instance of [microk8s](https://microk8s.io/), but please feel free to pick your favorite. If you do decide to use microk8s, please ensure that dns and storage addons are enabled ```bash microk8s enable dns storage ``` For simplicity, the demo application is not part of the k8s cluster, and will access the OpenTelemetry Collector through a NodePort on the cluster. Note that the NodePort opened by this demo is not secured. Ideally you'd want to either have your application running as part of the kubernetes cluster, or use a secured connection (NodePort/LoadBalancer with TLS or an ingress extension). If not using microk8s, ensure that cert-manager is installed by following [the instructions here](https://cert-manager.io/docs/installation/). # Deploying to Kubernetes All the necessary Kubernetes deployment files are available in this demo, in the [k8s](./k8s) folder. For your convenience, we assembled a [makefile](./Makefile) with deployment commands (see below). For those with subtly different systems, you are, of course, welcome to poke inside the Makefile and run the commands manually. If you use microk8s and alias `microk8s kubectl` to `kubectl`, the Makefile will not recognize the alias, and so the commands will have to be run manually. ## Setting up the Prometheus operator If you're using microk8s like us, simply do ```bash microk8s enable prometheus ``` and you're good to go. Move on to [Using the makefile](#using-the-makefile). Otherwise, obtain a copy of the Prometheus Operator stack from [prometheus-operator](https://github.com/prometheus-operator/kube-prometheus): ```bash git clone https://github.com/prometheus-operator/kube-prometheus.git cd kube-prometheus kubectl create -f manifests/setup # wait for namespaces and CRDs to become available, then kubectl create -f manifests/ ``` And to tear down the stack when you're finished: ```bash kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup ``` ## Using the makefile Next, we can deploy our Jaeger instance, Prometheus monitor, and Collector using the [makefile](./Makefile). ```bash # Create the namespace make namespace-k8s # Deploy Jaeger operator make jaeger-operator-k8s # After the operator is deployed, create the Jaeger instance make jaeger-k8s # Then the Prometheus instance. Ensure you have enabled a Prometheus operator # before executing (see above). make prometheus-k8s # Finally, deploy the OpenTelemetry Collector make otel-collector-k8s ``` If you want to clean up after this, you can use the `make clean-k8s` to delete all the resources created above. Note that this will not remove the namespace. Because Kubernetes sometimes gets stuck when removing namespaces, please remove this namespace manually after all the resources inside have been deleted, for example with ```bash kubectl delete namespaces observability ``` # Configuring the OpenTelemetry Collector Although the above steps should deploy and configure everything, let's spend some time on the [configuration](./k8s/otel-collector.yaml) of the Collector. One important part here is that, in order to enable our application to send data to the OpenTelemetry Collector, we need to first configure the `otlp` receiver: ```yml ... otel-collector-config: | receivers: # Make sure to add the otlp receiver. # This will open up the receiver on port 4317. otlp: protocols: grpc: endpoint: "0.0.0.0:4317" processors: ... ``` This will create the receiver on the Collector side, and open up port `4317` for receiving traces. The rest of the configuration is quite standard, with the only mention that we need to create the Jaeger and Prometheus exporters: ```yml ... exporters: jaeger: endpoint: "jaeger-collector.observability.svc.cluster.local:14250" prometheus: endpoint: 0.0.0.0:8889 namespace: "testapp" ... ``` ## OpenTelemetry Collector service One more aspect in the OpenTelemetry Collector [configuration](./k8s/otel-collector.yaml) worth looking at is the NodePort service used for accessing it: ```yaml apiVersion: v1 kind: Service metadata: ... spec: ports: - name: otlp # Default endpoint for otlp receiver. port: 4317 protocol: TCP targetPort: 4317 nodePort: 30080 - name: metrics # Endpoint for metrics from our app. port: 8889 protocol: TCP targetPort: 8889 selector: component: otel-collector type: NodePort ``` This service will bind the `4317` port used to access the otlp receiver to port `30080` on your cluster's node. By doing so, it makes it possible for us to access the Collector by using the static address `:30080`. In case you are running a local cluster, this will be `localhost:30080`. Note that you can also change this to a LoadBalancer or have an ingress extension for accessing the service. # Running the code You can find the complete code for this example in the [main.go](./main.go) file. To run it, ensure you have a somewhat recent version of Go (preferably >= 1.13) and do ```bash go run main.go ``` The example simulates an application, hard at work, computing for ten seconds then finishing. # Viewing instrumentation data Now the exciting part! Let's check out the telemetry data generated by our sample application ## Jaeger UI First, we need to enable an ingress provider. If you've been using microk8s, do ```bash microk8s enable ingress ``` Then find out where the Jaeger console is living: ```bash kubectl get ingress --all-namespaces ``` For us, we get the output ``` NAMESPACE NAME CLASS HOSTS ADDRESS PORTS AGE observability jaeger-query * 127.0.0.1 80 5h40m ``` indicating that the Jaeger UI is available at [http://localhost:80](http://localhost:80). Navigate there in your favorite web-browser to view the generated traces. ## Prometheus Unfortunately, the Prometheus operator doesn't provide a convenient out-of-the-box ingress route for us to use, so we'll use port-forwarding instead. Note: this is a quick-and-dirty solution for the sake of example. You *will* be attacked by shady people if you do this in production! ```bash kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090 ``` Then navigate to [http://localhost:9090](http://localhost:9090) to view the Prometheus dashboard. opentelemetry-go-1.21.0/example/otel-collector/go.mod000066400000000000000000000027231452547353200226000ustar00rootroot00000000000000module go.opentelemetry.io/otel/example/otel-collector go 1.20 replace ( go.opentelemetry.io/otel => ../.. go.opentelemetry.io/otel/sdk => ../../sdk ) require ( go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 google.golang.org/grpc v1.59.0 ) require ( github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/protobuf v1.31.0 // indirect ) replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../../exporters/otlp/otlptrace replace go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => ../../exporters/otlp/otlptrace/otlptracegrpc replace go.opentelemetry.io/otel/metric => ../../metric opentelemetry-go-1.21.0/example/otel-collector/go.sum000066400000000000000000000070151452547353200226240ustar00rootroot00000000000000github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= opentelemetry-go-1.21.0/example/otel-collector/k8s/000077500000000000000000000000001452547353200221735ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/otel-collector/k8s/jaeger.yaml000066400000000000000000000012511452547353200243130ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: jaegertracing.io/v1 kind: Jaeger metadata: name: jaeger namespace: observabilityopentelemetry-go-1.21.0/example/otel-collector/k8s/namespace.yaml000066400000000000000000000012101452547353200250050ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: Namespace metadata: name: observability opentelemetry-go-1.21.0/example/otel-collector/k8s/otel-collector.yaml000066400000000000000000000101511452547353200260040ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: v1 kind: ConfigMap metadata: name: otel-collector-conf namespace: observability labels: app: opentelemetry component: otel-collector-conf data: otel-collector-config: | receivers: # Make sure to add the otlp receiver. # This will open up the receiver on port 4317 otlp: protocols: grpc: endpoint: "0.0.0.0:4317" processors: extensions: health_check: {} exporters: jaeger: endpoint: "jaeger-collector.observability.svc.cluster.local:14250" insecure: true prometheus: endpoint: 0.0.0.0:8889 namespace: "testapp" logging: service: extensions: [health_check] pipelines: traces: receivers: [otlp] processors: [] exporters: [jaeger] metrics: receivers: [otlp] processors: [] exporters: [prometheus, logging] --- apiVersion: v1 kind: Service metadata: name: otel-collector namespace: observability labels: app: opentelemetry component: otel-collector spec: ports: - name: otlp # Default endpoint for otlp receiver. port: 4317 protocol: TCP targetPort: 4317 nodePort: 30080 - name: metrics # Default endpoint for metrics. port: 8889 protocol: TCP targetPort: 8889 selector: component: otel-collector type: NodePort --- apiVersion: apps/v1 kind: Deployment metadata: name: otel-collector namespace: observability labels: app: opentelemetry component: otel-collector spec: selector: matchLabels: app: opentelemetry component: otel-collector minReadySeconds: 5 progressDeadlineSeconds: 120 replicas: 1 #TODO - adjust this to your own requirements template: metadata: annotations: prometheus.io/path: "/metrics" prometheus.io/port: "8889" prometheus.io/scrape: "true" labels: app: opentelemetry component: otel-collector spec: containers: - command: - "/otelcol" - "--config=/conf/otel-collector-config.yaml" # Memory Ballast size should be max 1/3 to 1/2 of memory. - "--mem-ballast-size-mib=683" env: - name: GOGC value: "80" image: otel/opentelemetry-collector:0.6.0 name: otel-collector resources: limits: cpu: 1 memory: 2Gi requests: cpu: 200m memory: 400Mi ports: - containerPort: 4317 # Default endpoint for otlp receiver. - containerPort: 8889 # Default endpoint for querying metrics. volumeMounts: - name: otel-collector-config-vol mountPath: /conf # - name: otel-collector-secrets # mountPath: /secrets livenessProbe: httpGet: path: / port: 13133 # Health Check extension default port. readinessProbe: httpGet: path: / port: 13133 # Health Check extension default port. volumes: - configMap: name: otel-collector-conf items: - key: otel-collector-config path: otel-collector-config.yaml name: otel-collector-config-vol # - secret: # name: otel-collector-secrets # items: # - key: cert.pem # path: cert.pem # - key: key.pem # path: key.pem opentelemetry-go-1.21.0/example/otel-collector/k8s/prometheus-monitor.yaml000066400000000000000000000023071452547353200267410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: monitoring.coreos.com/v1 kind: Prometheus metadata: labels: app: prometheus prometheus: service-prometheus name: service-prometheus namespace: monitoring spec: alerting: alertmanagers: - name: alertmanager-main namespace: monitoring port: web baseImage: quay.io/prometheus/prometheus logLevel: info paused: false replicas: 2 retention: 2d routePrefix: / ruleSelector: matchLabels: prometheus: service-prometheus role: alert-rules serviceAccountName: prometheus-k8s serviceMonitorSelector: matchExpressions: - key: serviceapp operator: Exists opentelemetry-go-1.21.0/example/otel-collector/k8s/prometheus-service.yaml000066400000000000000000000017271452547353200267170ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: labels: serviceapp: otel-collector name: otel-collector namespace: observability spec: endpoints: - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token interval: 30s port: metrics namespaceSelector: matchNames: - observability selector: matchLabels: app: opentelemetry opentelemetry-go-1.21.0/example/otel-collector/main.go000066400000000000000000000103411452547353200227400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Example using OTLP exporters + collector + third-party backends. For // information about using the exporter, see: // https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp?tab=doc#example-package-Insecure package main import ( "context" "fmt" "log" "os" "os/signal" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" ) // Initializes an OTLP exporter, and configures the corresponding trace and // metric providers. func initProvider() (func(context.Context) error, error) { ctx := context.Background() res, err := resource.New(ctx, resource.WithAttributes( // the service name used to display traces in backends semconv.ServiceName("test-service"), ), ) if err != nil { return nil, fmt.Errorf("failed to create resource: %w", err) } // If the OpenTelemetry Collector is running on a local cluster (minikube or // microk8s), it should be accessible through the NodePort service at the // `localhost:30080` endpoint. Otherwise, replace `localhost` with the // endpoint of your cluster. If you run the app inside k8s, then you can // probably connect directly to the service through dns. ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() conn, err := grpc.DialContext(ctx, "localhost:30080", // Note the use of insecure transport here. TLS is recommended in production. grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithBlock(), ) if err != nil { return nil, fmt.Errorf("failed to create gRPC connection to collector: %w", err) } // Set up a trace exporter traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithGRPCConn(conn)) if err != nil { return nil, fmt.Errorf("failed to create trace exporter: %w", err) } // Register the trace exporter with a TracerProvider, using a batch // span processor to aggregate spans before export. bsp := sdktrace.NewBatchSpanProcessor(traceExporter) tracerProvider := sdktrace.NewTracerProvider( sdktrace.WithSampler(sdktrace.AlwaysSample()), sdktrace.WithResource(res), sdktrace.WithSpanProcessor(bsp), ) otel.SetTracerProvider(tracerProvider) // set global propagator to tracecontext (the default is no-op). otel.SetTextMapPropagator(propagation.TraceContext{}) // Shutdown will flush any remaining spans and shut down the exporter. return tracerProvider.Shutdown, nil } func main() { log.Printf("Waiting for connection...") ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) defer cancel() shutdown, err := initProvider() if err != nil { log.Fatal(err) } defer func() { if err := shutdown(ctx); err != nil { log.Fatal("failed to shutdown TracerProvider: %w", err) } }() tracer := otel.Tracer("test-tracer") // Attributes represent additional key-value descriptors that can be bound // to a metric observer or recorder. commonAttrs := []attribute.KeyValue{ attribute.String("attrA", "chocolate"), attribute.String("attrB", "raspberry"), attribute.String("attrC", "vanilla"), } // work begins ctx, span := tracer.Start( ctx, "CollectorExporter-Example", trace.WithAttributes(commonAttrs...)) defer span.End() for i := 0; i < 10; i++ { _, iSpan := tracer.Start(ctx, fmt.Sprintf("Sample-%d", i)) log.Printf("Doing really hard work (%d / 10)\n", i+1) <-time.After(time.Second) iSpan.End() } log.Printf("Done!") } opentelemetry-go-1.21.0/example/passthrough/000077500000000000000000000000001452547353200211065ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/passthrough/README.md000066400000000000000000000030251452547353200223650ustar00rootroot00000000000000# "Passthrough" setup for OpenTelemetry Some Go programs may wish to propagate context without recording spans. To do this in OpenTelemetry, simply install `TextMapPropagators`, but do not install a TracerProvider using the SDK. This works because the default TracerProvider implementation returns a "Non-Recording" span that keeps the context of the caller but does not record spans. For example, when you initialize your global settings, the following will propagate context without recording spans: ```golang // Setup Propagators only otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) ``` But the following will propagate context _and_ create new, potentially recorded spans: ```golang // Setup SDK exp, _ := stdout.New(stdout.WithPrettyPrint()) tp = sdktrace.NewTracerProvider( sdktrace.WithBatcher(exp), ) otel.SetTracerProvider(tp) // Setup Propagators otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) ``` ## The Demo The demo has the following call structure: `Outer -> Passthrough -> Inner` If all components had both an SDK and propagators registered, we would expect the trace to look like: ``` |-------outer---------| |-Passthrough recv-| |Passthrough send| |---inner---| ``` However, in this demo, only the outer and inner have TracerProvider backed by the SDK. All components have Propagators set. In this case, we expect to see: ``` |-------outer---------| |---inner---| ``` opentelemetry-go-1.21.0/example/passthrough/go.mod000066400000000000000000000013411452547353200222130ustar00rootroot00000000000000module go.opentelemetry.io/otel/example/passthrough go 1.20 require ( go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect ) replace ( go.opentelemetry.io/otel => ../.. go.opentelemetry.io/otel/sdk => ../../sdk go.opentelemetry.io/otel/trace => ../../trace ) replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../../exporters/stdout/stdouttrace replace go.opentelemetry.io/otel/metric => ../../metric opentelemetry-go-1.21.0/example/passthrough/go.sum000066400000000000000000000017131452547353200222430ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= opentelemetry-go-1.21.0/example/passthrough/handler/000077500000000000000000000000001452547353200225235ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/passthrough/handler/handler.go000066400000000000000000000052341452547353200244730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package handler // import "go.opentelemetry.io/otel/example/passthrough/handler" import ( "context" "log" "net/http" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) // Handler is a minimal implementation of the handler and client from // go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp for demonstration purposes. // It handles an incoming http request, and makes an outgoing http request. type Handler struct { propagators propagation.TextMapPropagator tracer trace.Tracer next func(r *http.Request) } // New returns a new Handler that will trace requests before handing them off // to next. func New(next func(r *http.Request)) *Handler { // Like most instrumentation packages, this handler defaults to using the // global progatators and tracer providers. return &Handler{ propagators: otel.GetTextMapPropagator(), tracer: otel.Tracer("examples/passthrough/handler"), next: next, } } // HandleHTTPReq mimics what an instrumented http server does. func (h *Handler) HandleHTTPReq(r *http.Request) { ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) var span trace.Span log.Println("The \"handle passthrough request\" span should NOT be recorded, because it is recorded by a TracerProvider not backed by the SDK.") ctx, span = h.tracer.Start(ctx, "handle passthrough request") defer span.End() // Pretend to do work time.Sleep(time.Second) h.makeOutgoingRequest(ctx) } // makeOutgoingRequest mimics what an instrumented http client does. func (h *Handler) makeOutgoingRequest(ctx context.Context) { // make a new http request r, err := http.NewRequest("", "", nil) if err != nil { panic(err) } log.Println("The \"make outgoing request from passthrough\" span should NOT be recorded, because it is recorded by a TracerProvider not backed by the SDK.") ctx, span := h.tracer.Start(ctx, "make outgoing request from passthrough") defer span.End() r = r.WithContext(ctx) h.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) h.next(r) } opentelemetry-go-1.21.0/example/passthrough/main.go000066400000000000000000000065031452547353200223650ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "net/http" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/example/passthrough/handler" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/propagation" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) func main() { ctx := context.Background() initPassthroughGlobals() tp, err := nonGlobalTracer() if err != nil { log.Fatal(err) } defer func() { _ = tp.Shutdown(ctx) }() // make an initial http request r, err := http.NewRequest("", "", nil) if err != nil { panic(err) } // This is roughly what an instrumented http client does. log.Println("The \"make outer request\" span should be recorded, because it is recorded with a Tracer from the SDK TracerProvider") var span trace.Span ctx, span = tp.Tracer("example/passthrough/outer").Start(ctx, "make outer request") defer span.End() r = r.WithContext(ctx) otel.GetTextMapPropagator().Inject(ctx, propagation.HeaderCarrier(r.Header)) backendFunc := func(r *http.Request) { // This is roughly what an instrumented http server does. ctx := otel.GetTextMapPropagator().Extract(r.Context(), propagation.HeaderCarrier(r.Header)) log.Println("The \"handle inner request\" span should be recorded, because it is recorded with a Tracer from the SDK TracerProvider") _, span := tp.Tracer("example/passthrough/inner").Start(ctx, "handle inner request") defer span.End() // Do "backend work" time.Sleep(time.Second) } // This handler will be a passthrough, since we didn't set a global TracerProvider passthroughHandler := handler.New(backendFunc) passthroughHandler.HandleHTTPReq(r) } func initPassthroughGlobals() { // We explicitly DO NOT set the global TracerProvider using otel.SetTracerProvider(). // The unset TracerProvider returns a "non-recording" span, but still passes through context. log.Println("Register a global TextMapPropagator, but do not register a global TracerProvider to be in \"passthrough\" mode.") log.Println("The \"passthrough\" mode propagates the TraceContext and Baggage, but does not record spans.") otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) } // nonGlobalTracer creates a trace provider instance for testing, but doesn't // set it as the global tracer provider. func nonGlobalTracer() (*sdktrace.TracerProvider, error) { exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) if err != nil { return nil, fmt.Errorf("failed to initialize stdouttrace exporter: %w", err) } bsp := sdktrace.NewBatchSpanProcessor(exp) tp := sdktrace.NewTracerProvider( sdktrace.WithSampler(sdktrace.AlwaysSample()), sdktrace.WithSpanProcessor(bsp), ) return tp, nil } opentelemetry-go-1.21.0/example/prometheus/000077500000000000000000000000001452547353200207325ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/prometheus/doc.go000066400000000000000000000012461452547353200220310ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package main provides a code sample of the Prometheus exporter. package main opentelemetry-go-1.21.0/example/prometheus/go.mod000066400000000000000000000024261452547353200220440ustar00rootroot00000000000000module go.opentelemetry.io/otel/example/prometheus go 1.20 require ( github.com/prometheus/client_golang v1.17.0 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/prometheus v0.44.0 go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.21.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect go.opentelemetry.io/otel/sdk v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect google.golang.org/protobuf v1.31.0 // indirect ) replace go.opentelemetry.io/otel => ../.. replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus replace go.opentelemetry.io/otel/sdk => ../../sdk replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric replace go.opentelemetry.io/otel/metric => ../../metric replace go.opentelemetry.io/otel/trace => ../../trace opentelemetry-go-1.21.0/example/prometheus/go.sum000066400000000000000000000063071452547353200220730ustar00rootroot00000000000000github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= opentelemetry-go-1.21.0/example/prometheus/main.go000066400000000000000000000057311452547353200222130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "fmt" "log" "math/rand" "net/http" "os" "os/signal" "time" "github.com/prometheus/client_golang/prometheus/promhttp" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/prometheus" api "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/metric" ) const meterName = "github.com/open-telemetry/opentelemetry-go/example/prometheus" func main() { rng := rand.New(rand.NewSource(time.Now().UnixNano())) ctx := context.Background() // The exporter embeds a default OpenTelemetry Reader and // implements prometheus.Collector, allowing it to be used as // both a Reader and Collector. exporter, err := prometheus.New() if err != nil { log.Fatal(err) } provider := metric.NewMeterProvider(metric.WithReader(exporter)) meter := provider.Meter(meterName) // Start the prometheus HTTP server and pass the exporter Collector to it go serveMetrics() opt := api.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), ) // This is the equivalent of prometheus.NewCounterVec counter, err := meter.Float64Counter("foo", api.WithDescription("a simple counter")) if err != nil { log.Fatal(err) } counter.Add(ctx, 5, opt) gauge, err := meter.Float64ObservableGauge("bar", api.WithDescription("a fun little gauge")) if err != nil { log.Fatal(err) } _, err = meter.RegisterCallback(func(_ context.Context, o api.Observer) error { n := -10. + rng.Float64()*(90.) // [-10, 100) o.ObserveFloat64(gauge, n, opt) return nil }, gauge) if err != nil { log.Fatal(err) } // This is the equivalent of prometheus.NewHistogramVec histogram, err := meter.Float64Histogram( "baz", api.WithDescription("a histogram with custom buckets and rename"), api.WithExplicitBucketBoundaries(64, 128, 256, 512, 1024, 2048, 4096), ) if err != nil { log.Fatal(err) } histogram.Record(ctx, 136, opt) histogram.Record(ctx, 64, opt) histogram.Record(ctx, 701, opt) histogram.Record(ctx, 830, opt) ctx, _ = signal.NotifyContext(ctx, os.Interrupt) <-ctx.Done() } func serveMetrics() { log.Printf("serving metrics at localhost:2223/metrics") http.Handle("/metrics", promhttp.Handler()) err := http.ListenAndServe(":2223", nil) //nolint:gosec // Ignoring G114: Use of net/http serve function that has no support for setting timeouts. if err != nil { fmt.Printf("error serving http: %v", err) return } } opentelemetry-go-1.21.0/example/zipkin/000077500000000000000000000000001452547353200200435ustar00rootroot00000000000000opentelemetry-go-1.21.0/example/zipkin/Dockerfile000066400000000000000000000014241452547353200220360ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. FROM golang:1.21-alpine COPY . /go/src/github.com/open-telemetry/opentelemetry-go/ WORKDIR /go/src/github.com/open-telemetry/opentelemetry-go/example/zipkin/ RUN go install ./main.go CMD ["/go/bin/main"] opentelemetry-go-1.21.0/example/zipkin/README.md000066400000000000000000000017211452547353200213230ustar00rootroot00000000000000# Zipkin Exporter Example Send an example span to a [Zipkin](https://zipkin.io/) service. These instructions expect you have [docker-compose](https://docs.docker.com/compose/) installed. Bring up the `zipkin-collector` service and example `zipkin-client` service to send an example trace: ```sh docker-compose up --detach zipkin-collector zipkin-client ``` The `zipkin-client` service sends just one trace and exits. Retrieve the `traceId` generated by the `zipkin-client` service; should be the last line in the logs: ```sh docker-compose logs --tail=1 zipkin-client ``` With the `traceId` you can view the trace from the `zipkin-collector` service UI hosted on port `9411`, e.g. with `traceId` of `f5695ba3b2ed00ea583fa4fa0badbeef`: [http://localhost:9411/zipkin/traces/f5695ba3b2ed00ea583fa4fa0badbeef](http://localhost:9411/zipkin/traces/f5695ba3b2ed00ea583fa4fa0badbeef) Shut down the services when you are finished with the example: ```sh docker-compose down ``` opentelemetry-go-1.21.0/example/zipkin/docker-compose.yml000066400000000000000000000022011452547353200234730ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version: "3.7" services: zipkin-collector: image: openzipkin/zipkin-slim:latest ports: - "9411:9411" networks: - example zipkin-client: build: dockerfile: $PWD/Dockerfile context: ../.. command: - "/bin/sh" - "-c" - "while ! nc -w 1 -z zipkin-collector 9411; do echo sleep for 1s waiting for zipkin-collector to become available; sleep 1; done && /go/bin/main -zipkin http://zipkin-collector:9411/api/v2/spans" networks: - example depends_on: - zipkin-collector networks: example: opentelemetry-go-1.21.0/example/zipkin/go.mod000066400000000000000000000013541452547353200211540ustar00rootroot00000000000000module go.opentelemetry.io/otel/example/zipkin go 1.20 replace ( go.opentelemetry.io/otel => ../.. go.opentelemetry.io/otel/exporters/zipkin => ../../exporters/zipkin go.opentelemetry.io/otel/sdk => ../../sdk ) require ( go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/zipkin v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/openzipkin/zipkin-go v0.4.2 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect ) replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/metric => ../../metric opentelemetry-go-1.21.0/example/zipkin/go.sum000066400000000000000000000022001452547353200211700ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= opentelemetry-go-1.21.0/example/zipkin/main.go000066400000000000000000000052621452547353200213230ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Command zipkin is an example program that creates spans // and uploads to openzipkin collector. package main import ( "context" "flag" "log" "os" "os/signal" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/zipkin" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" ) var logger = log.New(os.Stderr, "zipkin-example", log.Ldate|log.Ltime|log.Llongfile) // initTracer creates a new trace provider instance and registers it as global trace provider. func initTracer(url string) (func(context.Context) error, error) { // Create Zipkin Exporter and install it as a global tracer. // // For demoing purposes, always sample. In a production application, you should // configure the sampler to a trace.ParentBased(trace.TraceIDRatioBased) set at the desired // ratio. exporter, err := zipkin.New( url, zipkin.WithLogger(logger), ) if err != nil { return nil, err } batcher := sdktrace.NewBatchSpanProcessor(exporter) tp := sdktrace.NewTracerProvider( sdktrace.WithSpanProcessor(batcher), sdktrace.WithResource(resource.NewWithAttributes( semconv.SchemaURL, semconv.ServiceName("zipkin-test"), )), ) otel.SetTracerProvider(tp) return tp.Shutdown, nil } func main() { url := flag.String("zipkin", "http://localhost:9411/api/v2/spans", "zipkin url") flag.Parse() ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) defer cancel() shutdown, err := initTracer(*url) if err != nil { log.Fatal(err) } defer func() { if err := shutdown(ctx); err != nil { log.Fatal("failed to shutdown TracerProvider: %w", err) } }() tr := otel.GetTracerProvider().Tracer("component-main") ctx, span := tr.Start(ctx, "foo", trace.WithSpanKind(trace.SpanKindServer)) <-time.After(6 * time.Millisecond) bar(ctx) <-time.After(6 * time.Millisecond) span.End() } func bar(ctx context.Context) { tr := otel.GetTracerProvider().Tracer("component-bar") _, span := tr.Start(ctx, "bar") <-time.After(6 * time.Millisecond) span.End() } opentelemetry-go-1.21.0/exporters/000077500000000000000000000000001452547353200171375ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/README.md000066400000000000000000000033031452547353200204150ustar00rootroot00000000000000# OpenTelemetry Exporters Once the OpenTelemetry SDK has created and processed telemetry, it needs to be exported. This package contains exporters for this purpose. ## Exporter Packages The following exporter packages are provided with the following OpenTelemetry signal support. | Exporter Package | Metrics | Traces | |:-----------------------------------------------------------------------------------------------------:|:-------:|:------:| | [go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc](./otlp/otlpmetric/otlpmetricgrpc) | ✓ | | | [go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp](./otlp/otlpmetric/otlpmetrichttp) | ✓ | | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](./otlp/otlptrace/otlptracegrpc) | | ✓ | | [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](./otlp/otlptrace/otlptracehttp) | | ✓ | | [go.opentelemetry.io/otel/exporters/prometheus](./prometheus) | ✓ | | | [go.opentelemetry.io/otel/exporters/stdout/stdoutmetric](./stdout/stdoutmetric) | ✓ | | | [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](./stdout/stdouttrace) | | ✓ | | [go.opentelemetry.io/otel/exporters/zipkin](./zipkin) | | ✓ | See the [OpenTelemetry registry] for 3rd-party exporters compatible with this project. [OpenTelemetry registry]: https://opentelemetry.io/registry/?language=go&component=exporter opentelemetry-go-1.21.0/exporters/otlp/000077500000000000000000000000001452547353200201155ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/000077500000000000000000000000001452547353200222775ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/000077500000000000000000000000001452547353200253355ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go000066400000000000000000000151041452547353200271430ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" import ( "context" "time" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { metadata metadata.MD exportTimeout time.Duration requestFunc retry.RequestFunc // ourConn keeps track of where conn was created: true if created here in // NewClient, or false if passed with an option. This is important on // Shutdown as the conn should only be closed if we created it. Otherwise, // it is up to the processes that passed the conn to close it. ourConn bool conn *grpc.ClientConn msc colmetricpb.MetricsServiceClient } // newClient creates a new gRPC metric client. func newClient(ctx context.Context, cfg oconf.Config) (*client, error) { c := &client{ exportTimeout: cfg.Metrics.Timeout, requestFunc: cfg.RetryConfig.RequestFunc(retryable), conn: cfg.GRPCConn, } if len(cfg.Metrics.Headers) > 0 { c.metadata = metadata.New(cfg.Metrics.Headers) } if c.conn == nil { // If the caller did not provide a ClientConn when the client was // created, create one using the configuration they did provide. userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version() dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)} dialOpts = append(dialOpts, cfg.DialOptions...) conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, dialOpts...) if err != nil { return nil, err } // Keep track that we own the lifecycle of this conn and need to close // it on Shutdown. c.ourConn = true c.conn = conn } c.msc = colmetricpb.NewMetricsServiceClient(c.conn) return c, nil } // Shutdown shuts down the client, freeing all resource. // // Any active connections to a remote endpoint are closed if they were created // by the client. Any gRPC connection passed during creation using // WithGRPCConn will not be closed. It is the caller's responsibility to // handle cleanup of that resource. func (c *client) Shutdown(ctx context.Context) error { // The otlpmetric.Exporter synchronizes access to client methods and // ensures this is called only once. The only thing that needs to be done // here is to release any computational resources the client holds. c.metadata = nil c.requestFunc = nil c.msc = nil err := ctx.Err() if c.ourConn { closeErr := c.conn.Close() // A context timeout error takes precedence over this error. if err == nil && closeErr != nil { err = closeErr } } c.conn = nil return err } // UploadMetrics sends protoMetrics to connected endpoint. // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { // The otlpmetric.Exporter synchronizes access to client methods, and // ensures this is not called after the Exporter is shutdown. Only thing // to do here is send data. select { case <-ctx.Done(): // Do not upload if the context is already expired. return ctx.Err() default: } ctx, cancel := c.exportContext(ctx) defer cancel() return c.requestFunc(ctx, func(iCtx context.Context) error { resp, err := c.msc.Export(iCtx, &colmetricpb.ExportMetricsServiceRequest{ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics}, }) if resp != nil && resp.PartialSuccess != nil { msg := resp.PartialSuccess.GetErrorMessage() n := resp.PartialSuccess.GetRejectedDataPoints() if n != 0 || msg != "" { err := internal.MetricPartialSuccessError(n, msg) otel.Handle(err) } } // nil is converted to OK. if status.Code(err) == codes.OK { // Success. return nil } return err }) } // exportContext returns a copy of parent with an appropriate deadline and // cancellation function based on the clients configured export timeout. // // It is the callers responsibility to cancel the returned context once its // use is complete, via the parent or directly with the returned CancelFunc, to // ensure all resources are correctly released. func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { var ( ctx context.Context cancel context.CancelFunc ) if c.exportTimeout > 0 { ctx, cancel = context.WithTimeout(parent, c.exportTimeout) } else { ctx, cancel = context.WithCancel(parent) } if c.metadata.Len() > 0 { ctx = metadata.NewOutgoingContext(ctx, c.metadata) } return ctx, cancel } // retryable returns if err identifies a request that can be retried and a // duration to wait for if an explicit throttle time is included in err. func retryable(err error) (bool, time.Duration) { s := status.Convert(err) return retryableGRPCStatus(s) } func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { switch s.Code() { case codes.Canceled, codes.DeadlineExceeded, codes.Aborted, codes.OutOfRange, codes.Unavailable, codes.DataLoss: // Additionally, handle RetryInfo. _, d := throttleDelay(s) return true, d case codes.ResourceExhausted: // Retry only if the server signals that the recovery from resource exhaustion is possible. return throttleDelay(s) } // Not a retry-able error. return false, 0 } // throttleDelay returns if the status is RetryInfo // and the duration to wait for if an explicit throttle time is included. func throttleDelay(s *status.Status) (bool, time.Duration) { for _, detail := range s.Details() { if t, ok := detail.(*errdetails.RetryInfo); ok { return true, t.RetryDelay.AsDuration() } } return false, 0 } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go000066400000000000000000000156141452547353200302100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetricgrpc import ( "context" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/durationpb" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestThrottleDelay(t *testing.T) { c := codes.ResourceExhausted testcases := []struct { status *status.Status wantOK bool wantDuration time.Duration }{ { status: status.New(c, "NoRetryInfo"), wantOK: false, wantDuration: 0, }, { status: func() *status.Status { s, err := status.New(c, "SingleRetryInfo").WithDetails( &errdetails.RetryInfo{ RetryDelay: durationpb.New(15 * time.Millisecond), }, ) require.NoError(t, err) return s }(), wantOK: true, wantDuration: 15 * time.Millisecond, }, { status: func() *status.Status { s, err := status.New(c, "ErrorInfo").WithDetails( &errdetails.ErrorInfo{Reason: "no throttle detail"}, ) require.NoError(t, err) return s }(), wantOK: false, wantDuration: 0, }, { status: func() *status.Status { s, err := status.New(c, "ErrorAndRetryInfo").WithDetails( &errdetails.ErrorInfo{Reason: "with throttle detail"}, &errdetails.RetryInfo{ RetryDelay: durationpb.New(13 * time.Minute), }, ) require.NoError(t, err) return s }(), wantOK: true, wantDuration: 13 * time.Minute, }, { status: func() *status.Status { s, err := status.New(c, "DoubleRetryInfo").WithDetails( &errdetails.RetryInfo{ RetryDelay: durationpb.New(13 * time.Minute), }, &errdetails.RetryInfo{ RetryDelay: durationpb.New(15 * time.Minute), }, ) require.NoError(t, err) return s }(), wantOK: true, wantDuration: 13 * time.Minute, }, } for _, tc := range testcases { t.Run(tc.status.Message(), func(t *testing.T) { ok, d := throttleDelay(tc.status) assert.Equal(t, tc.wantOK, ok) assert.Equal(t, tc.wantDuration, d) }) } } func TestRetryable(t *testing.T) { retryableCodes := map[codes.Code]bool{ codes.OK: false, codes.Canceled: true, codes.Unknown: false, codes.InvalidArgument: false, codes.DeadlineExceeded: true, codes.NotFound: false, codes.AlreadyExists: false, codes.PermissionDenied: false, codes.ResourceExhausted: false, codes.FailedPrecondition: false, codes.Aborted: true, codes.OutOfRange: true, codes.Unimplemented: false, codes.Internal: false, codes.Unavailable: true, codes.DataLoss: true, codes.Unauthenticated: false, } for c, want := range retryableCodes { got, _ := retryable(status.Error(c, "")) assert.Equalf(t, want, got, "evaluate(%s)", c) } } func TestRetryableGRPCStatusResourceExhaustedWithRetryInfo(t *testing.T) { delay := 15 * time.Millisecond s, err := status.New(codes.ResourceExhausted, "WithRetryInfo").WithDetails( &errdetails.RetryInfo{ RetryDelay: durationpb.New(delay), }, ) require.NoError(t, err) ok, d := retryableGRPCStatus(s) assert.True(t, ok) assert.Equal(t, delay, d) } type clientShim struct { *client } func (clientShim) Temporality(metric.InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } func (clientShim) Aggregation(metric.InstrumentKind) metric.Aggregation { return nil } func (clientShim) ForceFlush(ctx context.Context) error { return ctx.Err() } func TestClient(t *testing.T) { factory := func(rCh <-chan otest.ExportResult) (otest.Client, otest.Collector) { coll, err := otest.NewGRPCCollector("", rCh) require.NoError(t, err) ctx := context.Background() addr := coll.Addr().String() opts := []Option{WithEndpoint(addr), WithInsecure()} cfg := oconf.NewGRPCConfig(asGRPCOptions(opts)...) client, err := newClient(ctx, cfg) require.NoError(t, err) return clientShim{client}, coll } t.Run("Integration", otest.RunClientTests(factory)) } func TestConfig(t *testing.T) { factoryFunc := func(rCh <-chan otest.ExportResult, o ...Option) (metric.Exporter, *otest.GRPCCollector) { coll, err := otest.NewGRPCCollector("", rCh) require.NoError(t, err) ctx := context.Background() opts := append([]Option{ WithEndpoint(coll.Addr().String()), WithInsecure(), }, o...) exp, err := New(ctx, opts...) require.NoError(t, err) return exp, coll } t.Run("WithHeaders", func(t *testing.T) { key := "my-custom-header" headers := map[string]string{key: "custom-value"} exp, coll := factoryFunc(nil, WithHeaders(headers)) t.Cleanup(coll.Shutdown) ctx := context.Background() require.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{})) // Ensure everything is flushed. require.NoError(t, exp.Shutdown(ctx)) got := coll.Headers() require.Regexp(t, "OTel Go OTLP over gRPC metrics exporter/[01]\\..*", got) require.Contains(t, got, key) assert.Equal(t, got[key], []string{headers[key]}) }) t.Run("WithTimeout", func(t *testing.T) { // Do not send on rCh so the Collector never responds to the client. rCh := make(chan otest.ExportResult) t.Cleanup(func() { close(rCh) }) exp, coll := factoryFunc( rCh, WithTimeout(time.Millisecond), WithRetry(RetryConfig{Enabled: false}), ) t.Cleanup(coll.Shutdown) ctx := context.Background() t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) err := exp.Export(ctx, &metricdata.ResourceMetrics{}) assert.ErrorContains(t, err, context.DeadlineExceeded.Error()) }) t.Run("WithCustomUserAgent", func(t *testing.T) { key := "user-agent" customerUserAgent := "custom-user-agent" exp, coll := factoryFunc(nil, WithDialOption(grpc.WithUserAgent(customerUserAgent))) t.Cleanup(coll.Shutdown) ctx := context.Background() require.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{})) // Ensure everything is flushed. require.NoError(t, exp.Shutdown(ctx)) got := coll.Headers() assert.Contains(t, got[key][0], customerUserAgent) }) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go000066400000000000000000000233661452547353200271430ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" import ( "fmt" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" "go.opentelemetry.io/otel/sdk/metric" ) // Option applies a configuration option to the Exporter. type Option interface { applyGRPCOption(oconf.Config) oconf.Config } func asGRPCOptions(opts []Option) []oconf.GRPCOption { converted := make([]oconf.GRPCOption, len(opts)) for i, o := range opts { converted[i] = oconf.NewGRPCOption(o.applyGRPCOption) } return converted } // RetryConfig defines configuration for retrying the export of metric data // that failed. // // This configuration does not define any network retry strategy. That is // entirely handled by the gRPC ClientConn. type RetryConfig retry.Config type wrappedOption struct { oconf.GRPCOption } func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config { return w.ApplyGRPCOption(cfg) } // WithInsecure disables client transport security for the Exporter's gRPC // connection, just like grpc.WithInsecure() // (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used to determine client security. If the endpoint has a // scheme of "http" or "unix" client security will be disabled. If both are // set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, client security will be used. // // This option has no effect if WithGRPCConn is used. func WithInsecure() Option { return wrappedOption{oconf.WithInsecure()} } // WithEndpoint sets the target endpoint the Exporter will connect to. // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, "localhost:4317" will be used. // // This option has no effect if WithGRPCConn is used. func WithEndpoint(endpoint string) Option { return wrappedOption{oconf.WithEndpoint(endpoint)} } // WithReconnectionPeriod set the minimum amount of time between connection // attempts to the target endpoint. // // This option has no effect if WithGRPCConn is used. func WithReconnectionPeriod(rp time.Duration) Option { return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { cfg.ReconnectionPeriod = rp return cfg })} } func compressorToCompression(compressor string) oconf.Compression { if compressor == "gzip" { return oconf.GzipCompression } otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) return oconf.NoCompression } // WithCompressor sets the compressor the gRPC client uses. // Supported compressor values: "gzip". // // If the OTEL_EXPORTER_OTLP_COMPRESSION or // OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and // this option is not passed, that variable value will be used. That value can // be either "none" or "gzip". If both are set, // OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, no compressor will be used. // // This option has no effect if WithGRPCConn is used. func WithCompressor(compressor string) Option { return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))} } // WithHeaders will send the provided headers with each gRPC requests. // // If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS // environment variable is set, and this option is not passed, that variable // value will be used. The value will be parsed as a list of key value pairs. // These pairs are expected to be in the W3C Correlation-Context format // without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If // both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, no user headers will be set. func WithHeaders(headers map[string]string) Option { return wrappedOption{oconf.WithHeaders(headers)} } // WithTLSCredentials sets the gRPC connection to use creds. // // If the OTEL_EXPORTER_OTLP_CERTIFICATE or // OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and // this option is not passed, that variable value will be used. The value will // be parsed the filepath of the TLS certificate chain to use. If both are // set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, no TLS credentials will be used. // // This option has no effect if WithGRPCConn is used. func WithTLSCredentials(creds credentials.TransportCredentials) Option { return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { cfg.Metrics.GRPCCredentials = creds return cfg })} } // WithServiceConfig defines the default gRPC service config used. // // This option has no effect if WithGRPCConn is used. func WithServiceConfig(serviceConfig string) Option { return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { cfg.ServiceConfig = serviceConfig return cfg })} } // WithDialOption sets explicit grpc.DialOptions to use when establishing a // gRPC connection. The options here are appended to the internal grpc.DialOptions // used so they will take precedence over any other internal grpc.DialOptions // they might conflict with. // // This option has no effect if WithGRPCConn is used. func WithDialOption(opts ...grpc.DialOption) Option { return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { cfg.DialOptions = opts return cfg })} } // WithGRPCConn sets conn as the gRPC ClientConn used for all communication. // // This option takes precedence over any other option that relates to // establishing or persisting a gRPC connection to a target endpoint. Any // other option of those types passed will be ignored. // // It is the callers responsibility to close the passed conn. The Exporter // Shutdown method will not close this connection. func WithGRPCConn(conn *grpc.ClientConn) Option { return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { cfg.GRPCConn = conn return cfg })} } // WithTimeout sets the max amount of time an Exporter will attempt an export. // // This takes precedence over any retry settings defined by WithRetry. Once // this time limit has been reached the export is abandoned and the metric // data is dropped. // // If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT // environment variable is set, and this option is not passed, that variable // value will be used. The value will be parsed as an integer representing the // timeout in milliseconds. If both are set, // OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, a timeout of 10 seconds will be used. func WithTimeout(duration time.Duration) Option { return wrappedOption{oconf.WithTimeout(duration)} } // WithRetry sets the retry policy for transient retryable errors that are // returned by the target endpoint. // // If the target endpoint responds with not only a retryable error, but // explicitly returns a backoff time in the response, that time will take // precedence over these settings. // // These settings do not define any network retry strategy. That is entirely // handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially // after each error for no more than a total time of 1 minute. func WithRetry(settings RetryConfig) Option { return wrappedOption{oconf.WithRetry(retry.Config(settings))} } // WithTemporalitySelector sets the TemporalitySelector the client will use to // determine the Temporality of an instrument based on its kind. If this option // is not used, the client will use the DefaultTemporalitySelector from the // go.opentelemetry.io/otel/sdk/metric package. func WithTemporalitySelector(selector metric.TemporalitySelector) Option { return wrappedOption{oconf.WithTemporalitySelector(selector)} } // WithAggregationSelector sets the AggregationSelector the client will use to // determine the aggregation to use for an instrument based on its kind. If // this option is not used, the reader will use the DefaultAggregationSelector // from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation // explicitly passed for a view matching an instrument. func WithAggregationSelector(selector metric.AggregationSelector) Option { return wrappedOption{oconf.WithAggregationSelector(selector)} } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go000066400000000000000000000137471452547353200264450ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC. By default the telemetry is sent to https://localhost:4317. Exporter should be created using [New] and used with a [metric.PeriodicReader]. The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. The value must contain a host. The value may additionally a port, a scheme, and a path. The value accepts "http" and "https" scheme. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") - setting "true" disables client transport security for the exporter's gRPC connection. You can use this only when an endpoint is provided without the http or https scheme. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT. OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - key-value pairs used as gRPC metadata associated with gRPC requests. The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], except that additional semi-colon delimited metadata is not supported. Example value: "key1=value1,key2=value2". OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. The configuration can be overridden by [WithHeaders] option. OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") - maximum time in milliseconds the OTLP exporter waits for each batch export. OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. The configuration can be overridden by [WithTimeout] option. OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) - the gRPC compressor the exporter uses. Supported value: "gzip". OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) - the filepath to the trusted certificate to use when verifying a server's TLS credentials. OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) - the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) - the filepath to the clients private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") - aggregation temporality to use on the basis of instrument kind. Supported values: - "cumulative" - Cumulative aggregation temporality for all instrument kinds, - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds; Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds, - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds; Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds. The configuration can be overridden by [WithTemporalitySelector] option. OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") - default aggregation to use for histogram instruments. Supported values: - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation], - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation]. The configuration can be overridden by [WithAggregationSelector] option. [W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content [Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation [Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation */ package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/example_test.go000066400000000000000000000023041452547353200303550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetricgrpc_test import ( "context" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/sdk/metric" ) func Example() { ctx := context.Background() exp, err := otlpmetricgrpc.New(ctx) if err != nil { panic(err) } meterProvider := metric.NewMeterProvider(metric.WithReader(metric.NewPeriodicReader(exp))) defer func() { if err := meterProvider.Shutdown(ctx); err != nil { panic(err) } }() otel.SetMeterProvider(meterProvider) // From here, the meterProvider can be used by instrumentation to collect // telemetry. } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go000066400000000000000000000122161452547353200275360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" import ( "context" "fmt" "sync" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Exporter is a OpenTelemetry metric Exporter using gRPC. type Exporter struct { // Ensure synchronous access to the client across all functionality. clientMu sync.Mutex client interface { UploadMetrics(context.Context, *metricpb.ResourceMetrics) error Shutdown(context.Context) error } temporalitySelector metric.TemporalitySelector aggregationSelector metric.AggregationSelector shutdownOnce sync.Once } func newExporter(c *client, cfg oconf.Config) (*Exporter, error) { ts := cfg.Metrics.TemporalitySelector if ts == nil { ts = func(metric.InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } } as := cfg.Metrics.AggregationSelector if as == nil { as = metric.DefaultAggregationSelector } return &Exporter{ client: c, temporalitySelector: ts, aggregationSelector: as, }, nil } // Temporality returns the Temporality to use for an instrument kind. func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality { return e.temporalitySelector(k) } // Aggregation returns the Aggregation to use for an instrument kind. func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation { return e.aggregationSelector(k) } // Export transforms and transmits metric data to an OTLP receiver. // // This method returns an error if called after Shutdown. // This method returns an error if the method is canceled by the passed context. func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { defer global.Debug("OTLP/gRPC exporter export", "Data", rm) otlpRm, err := transform.ResourceMetrics(rm) // Best effort upload of transformable metrics. e.clientMu.Lock() upErr := e.client.UploadMetrics(ctx, otlpRm) e.clientMu.Unlock() if upErr != nil { if err == nil { return fmt.Errorf("failed to upload metrics: %w", upErr) } // Merge the two errors. return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr) } return err } // ForceFlush flushes any metric data held by an exporter. // // This method returns an error if called after Shutdown. // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. func (e *Exporter) ForceFlush(ctx context.Context) error { // The exporter and client hold no state, nothing to flush. return ctx.Err() } // Shutdown flushes all metric data held by an exporter and releases any held // computational resources. // // This method returns an error if called after Shutdown. // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. func (e *Exporter) Shutdown(ctx context.Context) error { err := errShutdown e.shutdownOnce.Do(func() { e.clientMu.Lock() client := e.client e.client = shutdownClient{} e.clientMu.Unlock() err = client.Shutdown(ctx) }) return err } var errShutdown = fmt.Errorf("gRPC exporter is shutdown") type shutdownClient struct{} func (c shutdownClient) err(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } return errShutdown } func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error { return c.err(ctx) } func (c shutdownClient) Shutdown(ctx context.Context) error { return c.err(ctx) } // MarshalLog returns logging data about the Exporter. func (e *Exporter) MarshalLog() interface{} { return struct{ Type string }{Type: "OTLP/gRPC"} } // New returns an OpenTelemetry metric Exporter. The Exporter can be used with // a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving // endpoint using gRPC. // // If an already established gRPC ClientConn is not passed in options using // WithGRPCConn, a connection to the OTLP endpoint will be established based // on options. If a connection cannot be establishes in the lifetime of ctx, // an error will be returned. func New(ctx context.Context, options ...Option) (*Exporter, error) { cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...) c, err := newClient(ctx, cfg) if err != nil { return nil, err } return newExporter(c, cfg) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter_test.go000066400000000000000000000062301452547353200305740ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" import ( "context" "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestExporterClientConcurrentSafe(t *testing.T) { const goroutines = 5 coll, err := otest.NewGRPCCollector("", nil) require.NoError(t, err) ctx := context.Background() addr := coll.Addr().String() opts := []Option{WithEndpoint(addr), WithInsecure()} cfg := oconf.NewGRPCConfig(asGRPCOptions(opts)...) client, err := newClient(ctx, cfg) require.NoError(t, err) exp, err := newExporter(client, oconf.Config{}) require.NoError(t, err) rm := new(metricdata.ResourceMetrics) done := make(chan struct{}) var wg, someWork sync.WaitGroup for i := 0; i < goroutines; i++ { wg.Add(1) someWork.Add(1) go func() { defer wg.Done() assert.NoError(t, exp.Export(ctx, rm)) assert.NoError(t, exp.ForceFlush(ctx)) // Ensure some work is done before shutting down. someWork.Done() for { _ = exp.Export(ctx, rm) _ = exp.ForceFlush(ctx) select { case <-done: return default: } } }() } someWork.Wait() assert.NoError(t, exp.Shutdown(ctx)) assert.ErrorIs(t, exp.Shutdown(ctx), errShutdown) close(done) wg.Wait() } func TestExporterDoesNotBlockTemporalityAndAggregation(t *testing.T) { rCh := make(chan otest.ExportResult, 1) coll, err := otest.NewGRPCCollector("", rCh) require.NoError(t, err) ctx := context.Background() addr := coll.Addr().String() opts := []Option{WithEndpoint(addr), WithInsecure()} cfg := oconf.NewGRPCConfig(asGRPCOptions(opts)...) client, err := newClient(ctx, cfg) require.NoError(t, err) exp, err := newExporter(client, oconf.Config{}) require.NoError(t, err) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() rm := new(metricdata.ResourceMetrics) t.Log("starting export") require.NoError(t, exp.Export(ctx, rm)) t.Log("export complete") }() assert.Eventually(t, func() bool { const inst = metric.InstrumentKindCounter // These should not be blocked. t.Log("getting temporality") _ = exp.Temporality(inst) t.Log("getting aggregation") _ = exp.Aggregation(inst) return true }, time.Second, 10*time.Millisecond) // Clear the export. rCh <- otest.ExportResult{} close(rCh) wg.Wait() } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod000066400000000000000000000031111452547353200264370ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc go 1.20 retract v0.32.2 // Contains unresolvable dependencies. require ( github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.21.0 go.opentelemetry.io/proto/otlp v1.0.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel => ../../../.. replace go.opentelemetry.io/otel/sdk => ../../../../sdk replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric replace go.opentelemetry.io/otel/metric => ../../../../metric replace go.opentelemetry.io/otel/trace => ../../../../trace opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum000066400000000000000000000110731452547353200264720ustar00rootroot00000000000000github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/000077500000000000000000000000001452547353200271515ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/000077500000000000000000000000001452547353200311275ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go000066400000000000000000000131721452547353200334400ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" import ( "crypto/tls" "crypto/x509" "errors" "fmt" "net/url" "strconv" "strings" "time" "go.opentelemetry.io/otel/internal/global" ) // ConfigFn is the generic function used to set a config. type ConfigFn func(*EnvOptionsReader) // EnvOptionsReader reads the required environment variables. type EnvOptionsReader struct { GetEnv func(string) string ReadFile func(string) ([]byte, error) Namespace string } // Apply runs every ConfigFn. func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { for _, o := range opts { o(e) } } // GetEnvValue gets an OTLP environment variable value of the specified key // using the GetEnv function. // This function prepends the OTLP specified namespace to all key lookups. func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) return v, v != "" } // WithString retrieves the specified config and passes it to ConfigFn as a string. func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(v) } } } // WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. func WithBool(n string, fn func(bool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b := strings.ToLower(v) == "true" fn(b) } } } // WithDuration retrieves the specified config and passes it to ConfigFn as a duration. func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { d, err := strconv.Atoi(v) if err != nil { global.Error(err, "parse duration", "input", v) return } fn(time.Duration(d) * time.Millisecond) } } } // WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(stringToHeader(v)) } } } // WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { u, err := url.Parse(v) if err != nil { global.Error(err, "parse url", "input", v) return } fn(u) } } } // WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b, err := e.ReadFile(v) if err != nil { global.Error(err, "read tls ca cert file", "file", v) return } c, err := createCertPool(b) if err != nil { global.Error(err, "create tls cert pool") return } fn(c) } } } // WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { return func(e *EnvOptionsReader) { vc, okc := e.GetEnvValue(nc) vk, okk := e.GetEnvValue(nk) if !okc || !okk { return } cert, err := e.ReadFile(vc) if err != nil { global.Error(err, "read tls client cert", "file", vc) return } key, err := e.ReadFile(vk) if err != nil { global.Error(err, "read tls client key", "file", vk) return } crt, err := tls.X509KeyPair(cert, key) if err != nil { global.Error(err, "create tls client key pair") return } fn(crt) } } func keyWithNamespace(ns, key string) string { if ns == "" { return key } return fmt.Sprintf("%s_%s", ns, key) } func stringToHeader(value string) map[string]string { headersPairs := strings.Split(value, ",") headers := make(map[string]string) for _, header := range headersPairs { n, v, found := strings.Cut(header, "=") if !found { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } name, err := url.PathUnescape(n) if err != nil { global.Error(err, "escape header key", "key", n) continue } trimmedName := strings.TrimSpace(name) value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) continue } trimmedValue := strings.TrimSpace(value) headers[trimmedName] = trimmedValue } return headers } func createCertPool(certBytes []byte) (*x509.CertPool, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return cp, nil } envconfig_test.go000066400000000000000000000260371452547353200344240ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig import ( "crypto/tls" "crypto/x509" "errors" "net/url" "testing" "time" "github.com/stretchr/testify/assert" ) const WeakKey = ` -----BEGIN EC PRIVATE KEY----- MHcCAQEEIEbrSPmnlSOXvVzxCyv+VR3a0HDeUTvOcqrdssZ2k4gFoAoGCCqGSM49 AwEHoUQDQgAEDMTfv75J315C3K9faptS9iythKOMEeV/Eep73nWX531YAkmmwBSB 2dXRD/brsgLnfG57WEpxZuY7dPRbxu33BA== -----END EC PRIVATE KEY----- ` const WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBjjCCATWgAwIBAgIUKQSMC66MUw+kPp954ZYOcyKAQDswCgYIKoZIzj0EAwIw EjEQMA4GA1UECgwHb3RlbC1nbzAeFw0yMjEwMTkwMDA5MTlaFw0yMzEwMTkwMDA5 MTlaMBIxEDAOBgNVBAoMB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC AAQMxN+/vknfXkLcr19qm1L2LK2Eo4wR5X8R6nvedZfnfVgCSabAFIHZ1dEP9uuy Aud8bntYSnFm5jt09FvG7fcEo2kwZzAdBgNVHQ4EFgQUicGuhnTTkYLZwofXMNLK SHFeCWgwHwYDVR0jBBgwFoAUicGuhnTTkYLZwofXMNLKSHFeCWgwDwYDVR0TAQH/ BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIg Lfma8FnnxeSOi6223AsFfYwsNZ2RderNsQrS0PjEHb0CIBkrWacqARUAu7uT4cGu jVcIxYQqhId5L8p/mAv2PWZS -----END CERTIFICATE----- ` type testOption struct { TestString string TestBool bool TestDuration time.Duration TestHeaders map[string]string TestURL *url.URL TestTLS *tls.Config } func TestEnvConfig(t *testing.T) { parsedURL, err := url.Parse("https://example.com") assert.NoError(t, err) options := []testOption{} for _, testcase := range []struct { name string reader EnvOptionsReader configs []ConfigFn expectedOptions []testOption }{ { name: "with no namespace and a matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HOLA", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a namespace and a matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "MY_NAMESPACE_HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "true" } else if n == "WORLD" { return "false" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), WithBool("WORLD", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: true, }, { TestBool: false, }, }, }, { name: "with an invalid bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: false, }, }, }, { name: "with a duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "60" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{ { TestDuration: 60_000_000, // 60 milliseconds }, }, }, { name: "with an invalid duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{}, }, { name: "with headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "userId=42,userName=alice" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{ "userId": "42", "userName": "alice", }, }, }, }, { name: "with invalid headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{}, }, }, }, { name: "with URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "https://example.com" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{ { TestURL: parsedURL, }, }, }, { name: "with invalid URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "i nvalid://url" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{}, }, } { t.Run(testcase.name, func(t *testing.T) { testcase.reader.Apply(testcase.configs...) assert.Equal(t, testcase.expectedOptions, options) options = []testOption{} }) } } func TestWithTLSConfig(t *testing.T) { pool, err := createCertPool([]byte(WeakCertificate)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { if n == "CERTIFICATE" { return "/path/cert.pem" } return "" }, ReadFile: func(p string) ([]byte, error) { if p == "/path/cert.pem" { return []byte(WeakCertificate), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithCertPool("CERTIFICATE", func(cp *x509.CertPool) { option = testOption{TestTLS: &tls.Config{RootCAs: cp}} }), ) // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, pool.Subjects(), option.TestTLS.RootCAs.Subjects()) } func TestWithClientCert(t *testing.T) { cert, err := tls.X509KeyPair([]byte(WeakCertificate), []byte(WeakKey)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { switch n { case "CLIENT_CERTIFICATE": return "/path/tls.crt" case "CLIENT_KEY": return "/path/tls.key" } return "" }, ReadFile: func(n string) ([]byte, error) { switch n { case "/path/tls.crt": return []byte(WeakCertificate), nil case "/path/tls.key": return []byte(WeakKey), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Equal(t, cert, option.TestTLS.Certificates[0]) reader.ReadFile = func(s string) ([]byte, error) { return nil, errors.New("oops") } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) reader.GetEnv = func(s string) string { return "" } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) } func TestStringToHeader(t *testing.T) { tests := []struct { name string value string want map[string]string }{ { name: "simple test", value: "userId=alice", want: map[string]string{"userId": "alice"}, }, { name: "simple test with spaces", value: " userId = alice ", want: map[string]string{"userId": "alice"}, }, { name: "simple header conforms to RFC 3986 spec", value: " userId = alice+test ", want: map[string]string{"userId": "alice+test"}, }, { name: "multiple headers encoded", value: "userId=alice,serverNode=DF%3A28,isProduction=false", want: map[string]string{ "userId": "alice", "serverNode": "DF:28", "isProduction": "false", }, }, { name: "multiple headers encoded per RFC 3986 spec", value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test", want: map[string]string{ "userId": "alice+test", "serverNode": "DF:28", "isProduction": "false", "namespace": "localhost/test", }, }, { name: "invalid headers format", value: "userId:alice", want: map[string]string{}, }, { name: "invalid key", value: "%XX=missing,userId=alice", want: map[string]string{ "userId": "alice", }, }, { name: "invalid value", value: "missing=%XX,userId=alice", want: map[string]string{ "userId": "alice", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.want, stringToHeader(tt.value)) }) } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/gen.go000066400000000000000000000100421452547353200302460ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/envconfig.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry\"}" --out=oconf/options.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig\"}" --out=oconf/options_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal\"}" --out=otest/client_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf\"}" --out=otest/collector.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/000077500000000000000000000000001452547353200302555ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig.go000066400000000000000000000172121452547353200325650ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" import ( "crypto/tls" "crypto/x509" "net/url" "os" "path" "strings" "time" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // DefaultEnvOptionsReader is the default environments reader. var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: os.Getenv, ReadFile: os.ReadFile, Namespace: "OTEL_EXPORTER_OTLP", } // ApplyGRPCEnvConfigs applies the env configurations for gRPC. func ApplyGRPCEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } return cfg } // ApplyHTTPEnvConfigs applies the env configurations for HTTP. func ApplyHTTPEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } return cfg } func getOptionsFromEnv() []GenericOption { opts := []GenericOption{} tlsConf := &tls.Config{} DefaultEnvOptionsReader.Apply( envconfig.WithURL("ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Metrics.Endpoint = u.Host // For OTLP/HTTP endpoint URLs without a per-signal // configuration, the passed endpoint is used as a base URL // and the signals are sent to these paths relative to that. cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath) return cfg }, withEndpointForGRPC(u))) }), envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Metrics.Endpoint = u.Host // For endpoint URLs for OTLP/HTTP per-signal variables, the // URL MUST be used as-is without any modification. The only // exception is that if an URL contains no path part, the root // path / MUST be used. path := u.Path if path == "" { path = "/" } cfg.Metrics.URLPath = path return cfg }, withEndpointForGRPC(u))) }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), ) return opts } func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { return func(cfg Config) Config { // For OTLP/gRPC endpoints, this is the target to which the // exporter is going to send telemetry. cfg.Metrics.Endpoint = path.Join(u.Host, u.Path) return cfg } } // WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { cp := NoCompression if v == "gzip" { cp = GzipCompression } fn(cp) } } } func withEndpointScheme(u *url.URL) GenericOption { switch strings.ToLower(u.Scheme) { case "http", "unix": return WithInsecure() default: return WithSecure() } } // revive:disable-next-line:flag-parameter func withInsecure(b bool) GenericOption { if b { return WithInsecure() } return WithSecure() } func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if c.RootCAs != nil || len(c.Certificates) > 0 { fn(c) } } } func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "cumulative": fn(cumulativeTemporality) case "delta": fn(deltaTemporality) case "lowmemory": fn(lowMemory) default: global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) } } } } func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality { switch ik { case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter: return metricdata.DeltaTemporality default: return metricdata.CumulativeTemporality } } func lowMemory(ik metric.InstrumentKind) metricdata.Temporality { switch ik { case metric.InstrumentKindCounter, metric.InstrumentKindHistogram: return metricdata.DeltaTemporality default: return metricdata.CumulativeTemporality } } func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "explicit_bucket_histogram": fn(metric.DefaultAggregationSelector) case "base2_exponential_bucket_histogram": fn(func(kind metric.InstrumentKind) metric.Aggregation { if kind == metric.InstrumentKindHistogram { return metric.AggregationBase2ExponentialHistogram{ MaxSize: 160, MaxScale: 20, NoMinMax: false, } } return metric.DefaultAggregationSelector(kind) }) default: global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) } } } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/envconfig_test.go000066400000000000000000000150701452547353200336240ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestWithEnvTemporalityPreference(t *testing.T) { origReader := DefaultEnvOptionsReader.GetEnv tests := []struct { name string envValue string want map[metric.InstrumentKind]metricdata.Temporality }{ { name: "default do not set the selector", envValue: "", }, { name: "non-normative do not set the selector", envValue: "non-normative", }, { name: "cumulative", envValue: "cumulative", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.CumulativeTemporality, metric.InstrumentKindHistogram: metricdata.CumulativeTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, { name: "delta", envValue: "delta", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.DeltaTemporality, metric.InstrumentKindHistogram: metricdata.DeltaTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.DeltaTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, { name: "lowmemory", envValue: "lowmemory", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.DeltaTemporality, metric.InstrumentKindHistogram: metricdata.DeltaTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { DefaultEnvOptionsReader.GetEnv = func(key string) string { if key == "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" { return tt.envValue } return origReader(key) } cfg := Config{} cfg = ApplyGRPCEnvConfigs(cfg) if tt.want == nil { // There is no function set, the SDK's default is used. assert.Nil(t, cfg.Metrics.TemporalitySelector) return } require.NotNil(t, cfg.Metrics.TemporalitySelector) for ik, want := range tt.want { assert.Equal(t, want, cfg.Metrics.TemporalitySelector(ik)) } }) } DefaultEnvOptionsReader.GetEnv = origReader } func TestWithEnvAggPreference(t *testing.T) { origReader := DefaultEnvOptionsReader.GetEnv tests := []struct { name string envValue string want map[metric.InstrumentKind]metric.Aggregation }{ { name: "default do not set the selector", envValue: "", }, { name: "non-normative do not set the selector", envValue: "non-normative", }, { name: "explicit_bucket_histogram", envValue: "explicit_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), metric.InstrumentKindHistogram: metric.DefaultAggregationSelector(metric.InstrumentKindHistogram), metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), }, }, { name: "base2_exponential_bucket_histogram", envValue: "base2_exponential_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), metric.InstrumentKindHistogram: metric.AggregationBase2ExponentialHistogram{ MaxSize: 160, MaxScale: 20, NoMinMax: false, }, metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { DefaultEnvOptionsReader.GetEnv = func(key string) string { if key == "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION" { return tt.envValue } return origReader(key) } cfg := Config{} cfg = ApplyGRPCEnvConfigs(cfg) if tt.want == nil { // There is no function set, the SDK's default is used. assert.Nil(t, cfg.Metrics.AggregationSelector) return } require.NotNil(t, cfg.Metrics.AggregationSelector) for ik, want := range tt.want { assert.Equal(t, want, cfg.Metrics.AggregationSelector(ik)) } }) } DefaultEnvOptionsReader.GetEnv = origReader } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go000066400000000000000000000230211452547353200322750ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" import ( "crypto/tls" "fmt" "path" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" "go.opentelemetry.io/otel/sdk/metric" ) const ( // DefaultMaxAttempts describes how many times the driver // should retry the sending of the payload in case of a // retryable error. DefaultMaxAttempts int = 5 // DefaultMetricsPath is a default URL path for endpoint that // receives metrics. DefaultMetricsPath string = "/v1/metrics" // DefaultBackoff is a default base backoff time used in the // exponential backoff strategy. DefaultBackoff time.Duration = 300 * time.Millisecond // DefaultTimeout is a default max waiting time for the backend to process // each span or metrics batch. DefaultTimeout time.Duration = 10 * time.Second ) type ( SignalConfig struct { Endpoint string Insecure bool TLSCfg *tls.Config Headers map[string]string Compression Compression Timeout time.Duration URLPath string // gRPC configurations GRPCCredentials credentials.TransportCredentials TemporalitySelector metric.TemporalitySelector AggregationSelector metric.AggregationSelector } Config struct { // Signal specific configurations Metrics SignalConfig RetryConfig retry.Config // gRPC configurations ReconnectionPeriod time.Duration ServiceConfig string DialOptions []grpc.DialOption GRPCConn *grpc.ClientConn } ) // NewHTTPConfig returns a new Config with all settings applied from opts and // any unset setting using the default HTTP config values. func NewHTTPConfig(opts ...HTTPOption) Config { cfg := Config{ Metrics: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), URLPath: DefaultMetricsPath, Compression: NoCompression, Timeout: DefaultTimeout, TemporalitySelector: metric.DefaultTemporalitySelector, AggregationSelector: metric.DefaultAggregationSelector, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyHTTPEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath) return cfg } // cleanPath returns a path with all spaces trimmed and all redundancies // removed. If urlPath is empty or cleaning it results in an empty string, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { tmp := path.Clean(strings.TrimSpace(urlPath)) if tmp == "." { return defaultPath } if !path.IsAbs(tmp) { tmp = fmt.Sprintf("/%s", tmp) } return tmp } // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { cfg := Config{ Metrics: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), URLPath: DefaultMetricsPath, Compression: NoCompression, Timeout: DefaultTimeout, TemporalitySelector: metric.DefaultTemporalitySelector, AggregationSelector: metric.DefaultAggregationSelector, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } // Priroritize GRPCCredentials over Insecure (passing both is an error). if cfg.Metrics.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) } else if cfg.Metrics.Insecure { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) } else { // Default to using the host's root CA. creds := credentials.NewTLS(nil) cfg.Metrics.GRPCCredentials = creds cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) } if cfg.Metrics.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, MinConnectTimeout: cfg.ReconnectionPeriod, } cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) } return cfg } type ( // GenericOption applies an option to the HTTP or gRPC driver. GenericOption interface { ApplyHTTPOption(Config) Config ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // HTTPOption applies an option to the HTTP driver. HTTPOption interface { ApplyHTTPOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // GRPCOption applies an option to the gRPC driver. GRPCOption interface { ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } ) // genericOption is an option that applies the same logic // for both gRPC and HTTP. type genericOption struct { fn func(Config) Config } func (g *genericOption) ApplyGRPCOption(cfg Config) Config { return g.fn(cfg) } func (g *genericOption) ApplyHTTPOption(cfg Config) Config { return g.fn(cfg) } func (genericOption) private() {} func newGenericOption(fn func(cfg Config) Config) GenericOption { return &genericOption{fn: fn} } // splitOption is an option that applies different logics // for gRPC and HTTP. type splitOption struct { httpFn func(Config) Config grpcFn func(Config) Config } func (g *splitOption) ApplyGRPCOption(cfg Config) Config { return g.grpcFn(cfg) } func (g *splitOption) ApplyHTTPOption(cfg Config) Config { return g.httpFn(cfg) } func (splitOption) private() {} func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { return &splitOption{httpFn: httpFn, grpcFn: grpcFn} } // httpOption is an option that is only applied to the HTTP driver. type httpOption struct { fn func(Config) Config } func (h *httpOption) ApplyHTTPOption(cfg Config) Config { return h.fn(cfg) } func (httpOption) private() {} func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { return &httpOption{fn: fn} } // grpcOption is an option that is only applied to the gRPC driver. type grpcOption struct { fn func(Config) Config } func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { return h.fn(cfg) } func (grpcOption) private() {} func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { return &grpcOption{fn: fn} } // Generic Options func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Endpoint = endpoint return cfg }) } func WithCompression(compression Compression) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Compression = compression return cfg }) } func WithURLPath(urlPath string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.URLPath = urlPath return cfg }) } func WithRetry(rc retry.Config) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.RetryConfig = rc return cfg }) } func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { return newSplitOption(func(cfg Config) Config { cfg.Metrics.TLSCfg = tlsCfg.Clone() return cfg }, func(cfg Config) Config { cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg) return cfg }) } func WithInsecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Insecure = true return cfg }) } func WithSecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Insecure = false return cfg }) } func WithHeaders(headers map[string]string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Headers = headers return cfg }) } func WithTimeout(duration time.Duration) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Timeout = duration return cfg }) } func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.TemporalitySelector = selector return cfg }) } func WithAggregationSelector(selector metric.AggregationSelector) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.AggregationSelector = selector return cfg }) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options_test.go000066400000000000000000000365751452547353200333560ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "errors" "testing" "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) const ( WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9 nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/ 1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH Lhnm4N/QDk5rek0= -----END CERTIFICATE----- ` WeakPrivateKey = ` -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV -----END PRIVATE KEY----- ` ) type env map[string]string func (e *env) getEnv(env string) string { return (*e)[env] } type fileReader map[string][]byte func (f *fileReader) readFile(filename string) ([]byte, error) { if b, ok := (*f)[filename]; ok { return b, nil } return nil, errors.New("file not found") } func TestConfigs(t *testing.T) { tlsCert, err := CreateTLSConfig([]byte(WeakCertificate)) assert.NoError(t, err) tests := []struct { name string opts []GenericOption env env fileReader fileReader asserts func(t *testing.T, c *Config, grpcOption bool) }{ { name: "Test default configs", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.Equal(t, "localhost:4317", c.Metrics.Endpoint) } else { assert.Equal(t, "localhost:4318", c.Metrics.Endpoint) } assert.Equal(t, NoCompression, c.Metrics.Compression) assert.Equal(t, map[string]string(nil), c.Metrics.Headers) assert.Equal(t, 10*time.Second, c.Metrics.Timeout) }, }, // Endpoint Tests { name: "Test With Endpoint", opts: []GenericOption{ WithEndpoint("someendpoint"), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "someendpoint", c.Metrics.Endpoint) }, }, { name: "Test Environment Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.False(t, c.Metrics.Insecure) if grpcOption { assert.Equal(t, "env.endpoint/prefix", c.Metrics.Endpoint) } else { assert.Equal(t, "env.endpoint", c.Metrics.Endpoint) assert.Equal(t, "/prefix/v1/metrics", c.Metrics.URLPath) } }, }, { name: "Test Environment Signal Specific Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var", "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "http://env.metrics.endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.True(t, c.Metrics.Insecure) assert.Equal(t, "env.metrics.endpoint", c.Metrics.Endpoint) if !grpcOption { assert.Equal(t, "/", c.Metrics.URLPath) } }, }, { name: "Test Mixed Environment and With Endpoint", opts: []GenericOption{ WithEndpoint("metrics_endpoint"), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint) }, }, { name: "Test Environment Endpoint with HTTP scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, { name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, { name: "Test Environment Endpoint with HTTPS scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, false, c.Metrics.Insecure) }, }, { name: "Test Environment Signal Specific Endpoint with uppercase scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "HtTp://env_metrics_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, // Certificate tests { name: "Test Default Certificate", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { assert.Nil(t, c.Metrics.TLSCfg) } }, }, { name: "Test With Certificate", opts: []GenericOption{ WithTLSClientConfig(tlsCert), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { // TODO: make sure gRPC's credentials actually works assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Signal Specific Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), "invalid_cert": []byte("invalid certificate file."), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Mixed Environment and With Certificate", opts: []GenericOption{}, env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, 1, len(c.Metrics.TLSCfg.RootCAs.Subjects())) } }, }, // Headers tests { name: "Test With Headers", opts: []GenericOption{ WithHeaders(map[string]string{"h1": "v1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers) }, }, { name: "Test Environment Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers) }, }, { name: "Test Environment Signal Specific Headers", env: map[string]string{ "OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers) }, }, { name: "Test Mixed Environment and With Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, opts: []GenericOption{ WithHeaders(map[string]string{"m1": "mv1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers) }, }, // Compression Tests { name: "Test With Compression", opts: []GenericOption{ WithCompression(GzipCompression), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Environment Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Environment Signal Specific Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Mixed Environment and With Compression", opts: []GenericOption{ WithCompression(NoCompression), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, NoCompression, c.Metrics.Compression) }, }, // Timeout Tests { name: "Test With Timeout", opts: []GenericOption{ WithTimeout(time.Duration(5 * time.Second)), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, 5*time.Second, c.Metrics.Timeout) }, }, { name: "Test Environment Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 15*time.Second) }, }, { name: "Test Environment Signal Specific Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 28*time.Second) }, }, { name: "Test Mixed Environment and With Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000", }, opts: []GenericOption{ WithTimeout(5 * time.Second), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 5*time.Second) }, }, // Temporality Selector Tests { name: "WithTemporalitySelector", opts: []GenericOption{ WithTemporalitySelector(deltaSelector), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { // Function value comparisons are disallowed, test non-default // behavior of a TemporalitySelector here to ensure our "catch // all" was set. var undefinedKind metric.InstrumentKind got := c.Metrics.TemporalitySelector assert.Equal(t, metricdata.DeltaTemporality, got(undefinedKind)) }, }, // Aggregation Selector Tests { name: "WithAggregationSelector", opts: []GenericOption{ WithAggregationSelector(dropSelector), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { // Function value comparisons are disallowed, test non-default // behavior of a AggregationSelector here to ensure our "catch // all" was set. var undefinedKind metric.InstrumentKind got := c.Metrics.AggregationSelector assert.Equal(t, metric.AggregationDrop{}, got(undefinedKind)) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { origEOR := DefaultEnvOptionsReader DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: tt.env.getEnv, ReadFile: tt.fileReader.readFile, Namespace: "OTEL_EXPORTER_OTLP", } t.Cleanup(func() { DefaultEnvOptionsReader = origEOR }) // Tests Generic options as HTTP Options cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...) tt.asserts(t, &cfg, false) // Tests Generic options as gRPC Options cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...) tt.asserts(t, &cfg, true) }) } } func dropSelector(metric.InstrumentKind) metric.Aggregation { return metric.AggregationDrop{} } func deltaSelector(metric.InstrumentKind) metricdata.Temporality { return metricdata.DeltaTemporality } func asHTTPOptions(opts []GenericOption) []HTTPOption { converted := make([]HTTPOption, len(opts)) for i, o := range opts { converted[i] = NewHTTPOption(o.ApplyHTTPOption) } return converted } func asGRPCOptions(opts []GenericOption) []GRPCOption { converted := make([]GRPCOption, len(opts)) for i, o := range opts { converted[i] = NewGRPCOption(o.ApplyGRPCOption) } return converted } func TestCleanPath(t *testing.T) { type args struct { urlPath string defaultPath string } tests := []struct { name string args args want string }{ { name: "clean empty path", args: args{ urlPath: "", defaultPath: "DefaultPath", }, want: "DefaultPath", }, { name: "clean metrics path", args: args{ urlPath: "/prefix/v1/metrics", defaultPath: "DefaultMetricsPath", }, want: "/prefix/v1/metrics", }, { name: "clean traces path", args: args{ urlPath: "https://env_endpoint", defaultPath: "DefaultTracesPath", }, want: "/https:/env_endpoint", }, { name: "spaces trimmed", args: args{ urlPath: " /dir", }, want: "/dir", }, { name: "clean path empty", args: args{ urlPath: "dir/..", defaultPath: "DefaultTracesPath", }, want: "DefaultTracesPath", }, { name: "make absolute", args: args{ urlPath: "dir/a", }, want: "/dir/a", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want { t.Errorf("CleanPath() = %v, want %v", got, tt.want) } }) } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/optiontypes.go000066400000000000000000000044001452547353200331770ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" import "time" const ( // DefaultCollectorGRPCPort is the default gRPC port of the collector. DefaultCollectorGRPCPort uint16 = 4317 // DefaultCollectorHTTPPort is the default HTTP port of the collector. DefaultCollectorHTTPPort uint16 = 4318 // DefaultCollectorHost is the host address the Exporter will attempt // connect to if no collector address is provided. DefaultCollectorHost string = "localhost" ) // Compression describes the compression used for payloads sent to the // collector. type Compression int const ( // NoCompression tells the driver to send payloads without // compression. NoCompression Compression = iota // GzipCompression tells the driver to send payloads after // compressing them with gzip. GzipCompression ) // RetrySettings defines configuration for retrying batches in case of export failure // using an exponential backoff. type RetrySettings struct { // Enabled indicates whether to not retry sending batches in case of export failure. Enabled bool // InitialInterval the time to wait after the first failure before retrying. InitialInterval time.Duration // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between // consecutive retries will always be `MaxInterval`. MaxInterval time.Duration // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. // Once this value is reached, the data is discarded. MaxElapsedTime time.Duration } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/tls.go000066400000000000000000000030011452547353200314000ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" import ( "crypto/tls" "crypto/x509" "errors" "os" ) // ReadTLSConfigFromFile reads a PEM certificate file and creates // a tls.Config that will use this certifate to verify a server certificate. func ReadTLSConfigFromFile(path string) (*tls.Config, error) { b, err := os.ReadFile(path) if err != nil { return nil, err } return CreateTLSConfig(b) } // CreateTLSConfig creates a tls.Config from a raw certificate bytes // to verify a server certificate. func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return &tls.Config{ RootCAs: cp, }, nil } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/000077500000000000000000000000001452547353200303075ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go000066400000000000000000000224011452547353200321130ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/client.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest" import ( "context" "fmt" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) var ( // Sat Jan 01 2000 00:00:00 GMT+0000. start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0)) end = start.Add(30 * time.Second) kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "alice"}, }} kvBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "bob"}, }} kvSrvName = &cpb.KeyValue{Key: "service.name", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "test server"}, }} kvSrvVer = &cpb.KeyValue{Key: "service.version", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"}, }} min, max, sum = 2.0, 4.0, 90.0 hdp = []*mpb.HistogramDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sum, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: &min, Max: &max, }, } hist = &mpb.Histogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: hdp, } dPtsInt64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, { Attributes: []*cpb.KeyValue{kvBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 2}, }, } dPtsFloat64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0}, }, { Attributes: []*cpb.KeyValue{kvBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0}, }, } sumInt64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, IsMonotonic: true, DataPoints: dPtsInt64, } sumFloat64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, IsMonotonic: false, DataPoints: dPtsFloat64, } gaugeInt64 = &mpb.Gauge{DataPoints: dPtsInt64} gaugeFloat64 = &mpb.Gauge{DataPoints: dPtsFloat64} metrics = []*mpb.Metric{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: gaugeInt64}, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: gaugeFloat64}, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: sumInt64}, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: sumFloat64}, }, { Name: "histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: hist}, }, } scope = &cpb.InstrumentationScope{ Name: "test/code/path", Version: "v0.1.0", } scopeMetrics = []*mpb.ScopeMetrics{ { Scope: scope, Metrics: metrics, SchemaUrl: semconv.SchemaURL, }, } res = &rpb.Resource{ Attributes: []*cpb.KeyValue{kvSrvName, kvSrvVer}, } resourceMetrics = &mpb.ResourceMetrics{ Resource: res, ScopeMetrics: scopeMetrics, SchemaUrl: semconv.SchemaURL, } ) type Client interface { UploadMetrics(context.Context, *mpb.ResourceMetrics) error ForceFlush(context.Context) error Shutdown(context.Context) error } // ClientFactory is a function that when called returns a // Client implementation that is connected to also returned // Collector implementation. The Client is ready to upload metric data to the // Collector which is ready to store that data. // // If resultCh is not nil, the returned Collector needs to use the responses // from that channel to send back to the client for every export request. type ClientFactory func(resultCh <-chan ExportResult) (Client, Collector) // RunClientTests runs a suite of Client integration tests. For example: // // t.Run("Integration", RunClientTests(factory)) func RunClientTests(f ClientFactory) func(*testing.T) { return func(t *testing.T) { t.Run("ClientHonorsContextErrors", func(t *testing.T) { t.Run("Shutdown", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return c.Shutdown })) t.Run("ForceFlush", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return c.ForceFlush })) t.Run("UploadMetrics", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return func(ctx context.Context) error { return c.UploadMetrics(ctx, nil) } })) }) t.Run("ForceFlushFlushes", func(t *testing.T) { ctx := context.Background() client, collector := f(nil) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.ForceFlush(ctx)) rm := collector.Collect().Dump() // Data correctness is not important, just it was received. require.Greater(t, len(rm), 0, "no data uploaded") require.NoError(t, client.Shutdown(ctx)) rm = collector.Collect().Dump() assert.Len(t, rm, 0, "client did not flush all data") }) t.Run("UploadMetrics", func(t *testing.T) { ctx := context.Background() client, coll := f(nil) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.Shutdown(ctx)) got := coll.Collect().Dump() require.Len(t, got, 1, "upload of one ResourceMetrics") diff := cmp.Diff(got[0], resourceMetrics, cmp.Comparer(proto.Equal)) if diff != "" { t.Fatalf("unexpected ResourceMetrics:\n%s", diff) } }) t.Run("PartialSuccess", func(t *testing.T) { const n, msg = 2, "bad data" rCh := make(chan ExportResult, 3) rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{ PartialSuccess: &collpb.ExportMetricsPartialSuccess{ RejectedDataPoints: n, ErrorMessage: msg, }, }, } rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{ PartialSuccess: &collpb.ExportMetricsPartialSuccess{ // Should not be logged. RejectedDataPoints: 0, ErrorMessage: "", }, }, } rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{}, } ctx := context.Background() client, _ := f(rCh) defer func(orig otel.ErrorHandler) { otel.SetErrorHandler(orig) }(otel.GetErrorHandler()) errs := []error{} eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) }) otel.SetErrorHandler(eh) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.Shutdown(ctx)) require.Equal(t, 1, len(errs)) want := fmt.Sprintf("%s (%d metric data points rejected)", msg, n) assert.ErrorContains(t, errs[0], want) }) } } func testCtxErrs(factory func() func(context.Context) error) func(t *testing.T) { return func(t *testing.T) { t.Helper() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) t.Run("DeadlineExceeded", func(t *testing.T) { innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) t.Cleanup(innerCancel) <-innerCtx.Done() f := factory() assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded) }) t.Run("Canceled", func(t *testing.T) { innerCtx, innerCancel := context.WithCancel(ctx) innerCancel() f := factory() assert.ErrorIs(t, f(innerCtx), context.Canceled) }) } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client_test.go000066400000000000000000000044621452547353200331610ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest import ( "context" "testing" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" cpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { rCh <-chan ExportResult storage *Storage } func (c *client) Temporality(k metric.InstrumentKind) metricdata.Temporality { return metric.DefaultTemporalitySelector(k) } func (c *client) Aggregation(k metric.InstrumentKind) metric.Aggregation { return metric.DefaultAggregationSelector(k) } func (c *client) Collect() *Storage { return c.storage } func (c *client) UploadMetrics(ctx context.Context, rm *mpb.ResourceMetrics) error { c.storage.Add(&cpb.ExportMetricsServiceRequest{ ResourceMetrics: []*mpb.ResourceMetrics{rm}, }) if c.rCh != nil { r := <-c.rCh if r.Response != nil && r.Response.GetPartialSuccess() != nil { msg := r.Response.GetPartialSuccess().GetErrorMessage() n := r.Response.GetPartialSuccess().GetRejectedDataPoints() if msg != "" || n > 0 { otel.Handle(internal.MetricPartialSuccessError(n, msg)) } } return r.Err } return ctx.Err() } func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() } func (c *client) Shutdown(ctx context.Context) error { return ctx.Err() } func TestClientTests(t *testing.T) { factory := func(rCh <-chan ExportResult) (Client, Collector) { c := &client{rCh: rCh, storage: NewStorage()} return c, c } t.Run("Integration", RunClientTests(factory)) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go000066400000000000000000000277251452547353200326410ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/collector.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest" import ( "bytes" "compress/gzip" "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/tls" "crypto/x509" "crypto/x509/pkix" // nolint:depguard // This is for testing. "encoding/pem" "errors" "fmt" "io" "math/big" "net" "net/http" "net/url" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Collector is the collection target a Client sends metric uploads to. type Collector interface { Collect() *Storage } type ExportResult struct { Response *collpb.ExportMetricsServiceResponse Err error } // Storage stores uploaded OTLP metric data in their proto form. type Storage struct { dataMu sync.Mutex data []*mpb.ResourceMetrics } // NewStorage returns a configure storage ready to store received requests. func NewStorage() *Storage { return &Storage{} } // Add adds the request to the Storage. func (s *Storage) Add(request *collpb.ExportMetricsServiceRequest) { s.dataMu.Lock() defer s.dataMu.Unlock() s.data = append(s.data, request.ResourceMetrics...) } // Dump returns all added ResourceMetrics and clears the storage. func (s *Storage) Dump() []*mpb.ResourceMetrics { s.dataMu.Lock() defer s.dataMu.Unlock() var data []*mpb.ResourceMetrics data, s.data = s.data, []*mpb.ResourceMetrics{} return data } // GRPCCollector is an OTLP gRPC server that collects all requests it receives. type GRPCCollector struct { collpb.UnimplementedMetricsServiceServer headersMu sync.Mutex headers metadata.MD storage *Storage resultCh <-chan ExportResult listener net.Listener srv *grpc.Server } // NewGRPCCollector returns a *GRPCCollector that is listening at the provided // endpoint. // // If endpoint is an empty string, the returned collector will be listening on // the localhost interface at an OS chosen port. // // If errCh is not nil, the collector will respond to Export calls with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. func NewGRPCCollector(endpoint string, resultCh <-chan ExportResult) (*GRPCCollector, error) { if endpoint == "" { endpoint = "localhost:0" } c := &GRPCCollector{ storage: NewStorage(), resultCh: resultCh, } var err error c.listener, err = net.Listen("tcp", endpoint) if err != nil { return nil, err } c.srv = grpc.NewServer() collpb.RegisterMetricsServiceServer(c.srv, c) go func() { _ = c.srv.Serve(c.listener) }() return c, nil } // Shutdown shuts down the gRPC server closing all open connections and // listeners immediately. func (c *GRPCCollector) Shutdown() { c.srv.Stop() } // Addr returns the net.Addr c is listening at. func (c *GRPCCollector) Addr() net.Addr { return c.listener.Addr() } // Collect returns the Storage holding all collected requests. func (c *GRPCCollector) Collect() *Storage { return c.storage } // Headers returns the headers received for all requests. func (c *GRPCCollector) Headers() map[string][]string { // Makes a copy. c.headersMu.Lock() defer c.headersMu.Unlock() return metadata.Join(c.headers) } // Export handles the export req. func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) { c.storage.Add(req) if h, ok := metadata.FromIncomingContext(ctx); ok { c.headersMu.Lock() c.headers = metadata.Join(c.headers, h) c.headersMu.Unlock() } if c.resultCh != nil { r := <-c.resultCh if r.Response == nil { return &collpb.ExportMetricsServiceResponse{}, r.Err } return r.Response, r.Err } return &collpb.ExportMetricsServiceResponse{}, nil } var emptyExportMetricsServiceResponse = func() []byte { body := collpb.ExportMetricsServiceResponse{} r, err := proto.Marshal(&body) if err != nil { panic(err) } return r }() type HTTPResponseError struct { Err error Status int Header http.Header } func (e *HTTPResponseError) Error() string { return fmt.Sprintf("%d: %s", e.Status, e.Err) } func (e *HTTPResponseError) Unwrap() error { return e.Err } // HTTPCollector is an OTLP HTTP server that collects all requests it receives. type HTTPCollector struct { plainTextResponse bool headersMu sync.Mutex headers http.Header storage *Storage resultCh <-chan ExportResult listener net.Listener srv *http.Server } // NewHTTPCollector returns a *HTTPCollector that is listening at the provided // endpoint. // // If endpoint is an empty string, the returned collector will be listening on // the localhost interface at an OS chosen port, not use TLS, and listen at the // default OTLP metric endpoint path ("/v1/metrics"). If the endpoint contains // a prefix of "https" the server will generate weak self-signed TLS // certificates and use them to server data. If the endpoint contains a path, // that path will be used instead of the default OTLP metric endpoint path. // // If errCh is not nil, the collector will respond to HTTP requests with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } if u.Host == "" { u.Host = "localhost:0" } if u.Path == "" { u.Path = oconf.DefaultMetricsPath } c := &HTTPCollector{ headers: http.Header{}, storage: NewStorage(), resultCh: resultCh, } for _, opt := range opts { opt(c) } c.listener, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } mux := http.NewServeMux() mux.Handle(u.Path, http.HandlerFunc(c.handler)) c.srv = &http.Server{ Handler: mux, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, } if u.Scheme == "https" { cert, err := weakCertificate() if err != nil { return nil, err } c.srv.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, } go func() { _ = c.srv.ServeTLS(c.listener, "", "") }() } else { go func() { _ = c.srv.Serve(c.listener) }() } return c, nil } // WithHTTPCollectorRespondingPlainText makes the HTTPCollector return // a plaintext, instead of protobuf, response. func WithHTTPCollectorRespondingPlainText() func(*HTTPCollector) { return func(s *HTTPCollector) { s.plainTextResponse = true } } // Shutdown shuts down the HTTP server closing all open connections and // listeners. func (c *HTTPCollector) Shutdown(ctx context.Context) error { return c.srv.Shutdown(ctx) } // Addr returns the net.Addr c is listening at. func (c *HTTPCollector) Addr() net.Addr { return c.listener.Addr() } // Collect returns the Storage holding all collected requests. func (c *HTTPCollector) Collect() *Storage { return c.storage } // Headers returns the headers received for all requests. func (c *HTTPCollector) Headers() map[string][]string { // Makes a copy. c.headersMu.Lock() defer c.headersMu.Unlock() return c.headers.Clone() } func (c *HTTPCollector) handler(w http.ResponseWriter, r *http.Request) { c.respond(w, c.record(r)) } func (c *HTTPCollector) record(r *http.Request) ExportResult { // Currently only supports protobuf. if v := r.Header.Get("Content-Type"); v != "application/x-protobuf" { err := fmt.Errorf("content-type not supported: %s", v) return ExportResult{Err: err} } body, err := c.readBody(r) if err != nil { return ExportResult{Err: err} } pbRequest := &collpb.ExportMetricsServiceRequest{} err = proto.Unmarshal(body, pbRequest) if err != nil { return ExportResult{ Err: &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, }, } } c.storage.Add(pbRequest) c.headersMu.Lock() for k, vals := range r.Header { for _, v := range vals { c.headers.Add(k, v) } } c.headersMu.Unlock() if c.resultCh != nil { return <-c.resultCh } return ExportResult{Err: err} } func (c *HTTPCollector) readBody(r *http.Request) (body []byte, err error) { var reader io.ReadCloser switch r.Header.Get("Content-Encoding") { case "gzip": reader, err = gzip.NewReader(r.Body) if err != nil { _ = reader.Close() return nil, &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, } } default: reader = r.Body } defer func() { cErr := reader.Close() if err == nil && cErr != nil { err = &HTTPResponseError{ Err: cErr, Status: http.StatusInternalServerError, } } }() body, err = io.ReadAll(reader) if err != nil { err = &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, } } return body, err } func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) { if resp.Err != nil { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") var e *HTTPResponseError if errors.As(resp.Err, &e) { for k, vals := range e.Header { for _, v := range vals { w.Header().Add(k, v) } } w.WriteHeader(e.Status) fmt.Fprintln(w, e.Error()) } else { w.WriteHeader(http.StatusBadRequest) fmt.Fprintln(w, resp.Err.Error()) } return } if c.plainTextResponse { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte("OK")) return } w.Header().Set("Content-Type", "application/x-protobuf") w.WriteHeader(http.StatusOK) if resp.Response == nil { _, _ = w.Write(emptyExportMetricsServiceResponse) } else { r, err := proto.Marshal(resp.Response) if err != nil { panic(err) } _, _ = w.Write(r) } } // Based on https://golang.org/src/crypto/tls/generate_cert.go, // simplified and weakened. func weakCertificate() (tls.Certificate, error) { priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return tls.Certificate{}, err } notBefore := time.Now() notAfter := notBefore.Add(time.Hour) max := new(big.Int).Lsh(big.NewInt(1), 128) sn, err := rand.Int(rand.Reader, max) if err != nil { return tls.Certificate{}, err } tmpl := x509.Certificate{ SerialNumber: sn, Subject: pkix.Name{Organization: []string{"otel-go"}}, NotBefore: notBefore, NotAfter: notAfter, KeyUsage: x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, DNSNames: []string{"localhost"}, IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)}, } derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) if err != nil { return tls.Certificate{}, err } var certBuf bytes.Buffer err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) if err != nil { return tls.Certificate{}, err } privBytes, err := x509.MarshalPKCS8PrivateKey(priv) if err != nil { return tls.Certificate{}, err } var privBuf bytes.Buffer err = pem.Encode(&privBuf, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}) if err != nil { return tls.Certificate{}, err } return tls.X509KeyPair(certBuf.Bytes(), privBuf.Bytes()) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess.go000066400000000000000000000042161452547353200325300ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal" import "fmt" // PartialSuccess represents the underlying error for all handling // OTLP partial success messages. Use `errors.Is(err, // PartialSuccess{})` to test whether an error passed to the OTel // error handler belongs to this category. type PartialSuccess struct { ErrorMessage string RejectedItems int64 RejectedKind string } var _ error = PartialSuccess{} // Error implements the error interface. func (ps PartialSuccess) Error() string { msg := ps.ErrorMessage if msg == "" { msg = "empty message" } return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) return ok } // TracePartialSuccessError returns an error describing a partial success // response for the trace signal. func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "spans", } } // MetricPartialSuccessError returns an error describing a partial success // response for the metric signal. func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "metric data points", } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/partialsuccess_test.go000066400000000000000000000031221452547353200335620ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess_test.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "errors" "strings" "testing" "github.com/stretchr/testify/require" ) func requireErrorString(t *testing.T, expect string, err error) { t.Helper() require.NotNil(t, err) require.Error(t, err) require.True(t, errors.Is(err, PartialSuccess{})) const pfx = "OTLP partial success: " msg := err.Error() require.True(t, strings.HasPrefix(msg, pfx)) require.Equal(t, expect, msg[len(pfx):]) } func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/000077500000000000000000000000001452547353200303165ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry.go000066400000000000000000000116021452547353200320120ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package retry provides request retry functionality that can perform // configurable exponential backoff for transient errors and honor any // explicit throttle responses received. package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry" import ( "context" "fmt" "time" "github.com/cenkalti/backoff/v4" ) // DefaultConfig are the recommended defaults to use. var DefaultConfig = Config{ Enabled: true, InitialInterval: 5 * time.Second, MaxInterval: 30 * time.Second, MaxElapsedTime: time.Minute, } // Config defines configuration for retrying batches in case of export failure // using an exponential backoff. type Config struct { // Enabled indicates whether to not retry sending batches in case of // export failure. Enabled bool // InitialInterval the time to wait after the first failure before // retrying. InitialInterval time.Duration // MaxInterval is the upper bound on backoff interval. Once this value is // reached the delay between consecutive retries will always be // `MaxInterval`. MaxInterval time.Duration // MaxElapsedTime is the maximum amount of time (including retries) spent // trying to send a request/batch. Once this value is reached, the data // is discarded. MaxElapsedTime time.Duration } // RequestFunc wraps a request with retry logic. type RequestFunc func(context.Context, func(context.Context) error) error // EvaluateFunc returns if an error is retry-able and if an explicit throttle // duration should be honored that was included in the error. // // The function must return true if the error argument is retry-able, // otherwise it must return false for the first return parameter. // // The function must return a non-zero time.Duration if the error contains // explicit throttle duration that should be honored, otherwise it must return // a zero valued time.Duration. type EvaluateFunc func(error) (bool, time.Duration) // RequestFunc returns a RequestFunc using the evaluate function to determine // if requests can be retried and based on the exponential backoff // configuration of c. func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { if !c.Enabled { return func(ctx context.Context, fn func(context.Context) error) error { return fn(ctx) } } return func(ctx context.Context, fn func(context.Context) error) error { // Do not use NewExponentialBackOff since it calls Reset and the code here // must call Reset after changing the InitialInterval (this saves an // unnecessary call to Now). b := &backoff.ExponentialBackOff{ InitialInterval: c.InitialInterval, RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, MaxElapsedTime: c.MaxElapsedTime, Stop: backoff.Stop, Clock: backoff.SystemClock, } b.Reset() for { err := fn(ctx) if err == nil { return nil } retryable, throttle := evaluate(err) if !retryable { return err } bOff := b.NextBackOff() if bOff == backoff.Stop { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. var delay time.Duration if bOff > throttle { delay = bOff } else { elapsed := b.GetElapsedTime() if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { return fmt.Errorf("max retry time would elapse: %w", err) } delay = throttle } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { return fmt.Errorf("%w: %s", ctxErr, err) } } } } // Allow override for testing. var waitFunc = wait // wait takes the caller's context, and the amount of time to wait. It will // return nil if the timer fires before or at the same time as the context's // deadline. This indicates that the call can be retried. func wait(ctx context.Context, delay time.Duration) error { timer := time.NewTimer(delay) defer timer.Stop() select { case <-ctx.Done(): // Handle the case where the timer and context deadline end // simultaneously by prioritizing the timer expiration nil value // response. select { case <-timer.C: default: return ctx.Err() } case <-timer.C: } return nil } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry/retry_test.go000066400000000000000000000145671452547353200330660ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package retry import ( "context" "errors" "math" "sync" "testing" "time" "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" ) func TestWait(t *testing.T) { tests := []struct { ctx context.Context delay time.Duration expected error }{ { ctx: context.Background(), delay: time.Duration(0), }, { ctx: context.Background(), delay: time.Duration(1), }, { ctx: context.Background(), delay: time.Duration(-1), }, { ctx: func() context.Context { ctx, cancel := context.WithCancel(context.Background()) cancel() return ctx }(), // Ensure the timer and context do not end simultaneously. delay: 1 * time.Hour, expected: context.Canceled, }, } for _, test := range tests { err := wait(test.ctx, test.delay) if test.expected == nil { assert.NoError(t, err) } else { assert.ErrorIs(t, err, test.expected) } } } func TestNonRetryableError(t *testing.T) { ev := func(error) (bool, time.Duration) { return false, 0 } reqFunc := Config{ Enabled: true, InitialInterval: 1 * time.Nanosecond, MaxInterval: 1 * time.Nanosecond, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestThrottledRetry(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. throttleDelay, backoffDelay := time.Second, time.Nanosecond ev := func(error) (bool, time.Duration) { // Retry everything with a throttle delay. return true, throttleDelay } reqFunc := Config{ Enabled: true, InitialInterval: backoffDelay, MaxInterval: backoffDelay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, delay time.Duration) error { assert.Equal(t, throttleDelay, delay, "retry not throttled") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } defer func() { waitFunc = origWait }() ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetry(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, d time.Duration) error { delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor) assert.InDelta(t, delay, d, delta, "retry not backoffed") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } t.Cleanup(func() { waitFunc = origWait }) ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetryCanceledContext(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Millisecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 10 * time.Millisecond, }.RequestFunc(ev) ctx, cancel := context.WithCancel(context.Background()) count := 0 cancel() err := reqFunc(ctx, func(context.Context) error { count++ return assert.AnError }) assert.ErrorIs(t, err, context.Canceled) assert.Contains(t, err.Error(), assert.AnError.Error()) assert.Equal(t, 1, count) } func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. tDelay, bDelay := time.Hour, time.Nanosecond ev := func(error) (bool, time.Duration) { return true, tDelay } reqFunc := Config{ Enabled: true, InitialInterval: bDelay, MaxInterval: bDelay, MaxElapsedTime: tDelay - (time.Nanosecond), }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time would elapse: ") } func TestMaxElapsedTime(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, // InitialInterval > MaxElapsedTime means immediate return. InitialInterval: 2 * delay, MaxElapsedTime: delay, }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time elapsed: ") } func TestRetryNotEnabled(t *testing.T) { ev := func(error) (bool, time.Duration) { t.Error("evaluated retry when not enabled") return false, 0 } reqFunc := Config{}.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestRetryConcurrentSafe(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } reqFunc := Config{ Enabled: true, }.RequestFunc(ev) var wg sync.WaitGroup ctx := context.Background() for i := 1; i < 5; i++ { wg.Add(1) go func() { defer wg.Done() var done bool assert.NoError(t, reqFunc(ctx, func(context.Context) error { if !done { done = true return assert.AnError } return nil })) }() } wg.Wait() } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/000077500000000000000000000000001452547353200311645ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/attribute.go000066400000000000000000000075771452547353200335360ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" import ( "go.opentelemetry.io/otel/attribute" cpb "go.opentelemetry.io/proto/otlp/common/v1" ) // AttrIter transforms an attribute iterator into OTLP key-values. func AttrIter(iter attribute.Iterator) []*cpb.KeyValue { l := iter.Len() if l == 0 { return nil } out := make([]*cpb.KeyValue, 0, l) for iter.Next() { out = append(out, KeyValue(iter.Attribute())) } return out } // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue { if len(attrs) == 0 { return nil } out := make([]*cpb.KeyValue, 0, len(attrs)) for _, kv := range attrs { out = append(out, KeyValue(kv)) } return out } // KeyValue transforms an attribute KeyValue into an OTLP key-value. func KeyValue(kv attribute.KeyValue) *cpb.KeyValue { return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} } // Value transforms an attribute Value into an OTLP AnyValue. func Value(v attribute.Value) *cpb.AnyValue { av := new(cpb.AnyValue) switch v.Type() { case attribute.BOOL: av.Value = &cpb.AnyValue_BoolValue{ BoolValue: v.AsBool(), } case attribute.BOOLSLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: boolSliceValues(v.AsBoolSlice()), }, } case attribute.INT64: av.Value = &cpb.AnyValue_IntValue{ IntValue: v.AsInt64(), } case attribute.INT64SLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: int64SliceValues(v.AsInt64Slice()), }, } case attribute.FLOAT64: av.Value = &cpb.AnyValue_DoubleValue{ DoubleValue: v.AsFloat64(), } case attribute.FLOAT64SLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: float64SliceValues(v.AsFloat64Slice()), }, } case attribute.STRING: av.Value = &cpb.AnyValue_StringValue{ StringValue: v.AsString(), } case attribute.STRINGSLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: stringSliceValues(v.AsStringSlice()), }, } default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", } } return av } func boolSliceValues(vals []bool) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_BoolValue{ BoolValue: v, }, } } return converted } func int64SliceValues(vals []int64) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_IntValue{ IntValue: v, }, } } return converted } func float64SliceValues(vals []float64) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_DoubleValue{ DoubleValue: v, }, } } return converted } func stringSliceValues(vals []string) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{ StringValue: v, }, } } return converted } attribute_test.go000066400000000000000000000131021452547353200344730ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" cpb "go.opentelemetry.io/proto/otlp/common/v1" ) var ( attrBool = attribute.Bool("bool", true) attrBoolSlice = attribute.BoolSlice("bool slice", []bool{true, false}) attrInt = attribute.Int("int", 1) attrIntSlice = attribute.IntSlice("int slice", []int{-1, 1}) attrInt64 = attribute.Int64("int64", 1) attrInt64Slice = attribute.Int64Slice("int64 slice", []int64{-1, 1}) attrFloat64 = attribute.Float64("float64", 1) attrFloat64Slice = attribute.Float64Slice("float64 slice", []float64{-1, 1}) attrString = attribute.String("string", "o") attrStringSlice = attribute.StringSlice("string slice", []string{"o", "n"}) attrInvalid = attribute.KeyValue{ Key: attribute.Key("invalid"), Value: attribute.Value{}, } valBoolTrue = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: true}} valBoolFalse = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: false}} valBoolSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valBoolTrue, valBoolFalse}, }, }} valIntOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: 1}} valIntNOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: -1}} valIntSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valIntNOne, valIntOne}, }, }} valDblOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: 1}} valDblNOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: -1}} valDblSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valDblNOne, valDblOne}, }, }} valStrO = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "o"}} valStrN = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "n"}} valStrSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valStrO, valStrN}, }, }} kvBool = &cpb.KeyValue{Key: "bool", Value: valBoolTrue} kvBoolSlice = &cpb.KeyValue{Key: "bool slice", Value: valBoolSlice} kvInt = &cpb.KeyValue{Key: "int", Value: valIntOne} kvIntSlice = &cpb.KeyValue{Key: "int slice", Value: valIntSlice} kvInt64 = &cpb.KeyValue{Key: "int64", Value: valIntOne} kvInt64Slice = &cpb.KeyValue{Key: "int64 slice", Value: valIntSlice} kvFloat64 = &cpb.KeyValue{Key: "float64", Value: valDblOne} kvFloat64Slice = &cpb.KeyValue{Key: "float64 slice", Value: valDblSlice} kvString = &cpb.KeyValue{Key: "string", Value: valStrO} kvStringSlice = &cpb.KeyValue{Key: "string slice", Value: valStrSlice} kvInvalid = &cpb.KeyValue{ Key: "invalid", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "INVALID"}, }, } ) type attributeTest struct { name string in []attribute.KeyValue want []*cpb.KeyValue } func TestAttributeTransforms(t *testing.T) { for _, test := range []attributeTest{ {"nil", nil, nil}, {"empty", []attribute.KeyValue{}, nil}, { "invalid", []attribute.KeyValue{attrInvalid}, []*cpb.KeyValue{kvInvalid}, }, { "bool", []attribute.KeyValue{attrBool}, []*cpb.KeyValue{kvBool}, }, { "bool slice", []attribute.KeyValue{attrBoolSlice}, []*cpb.KeyValue{kvBoolSlice}, }, { "int", []attribute.KeyValue{attrInt}, []*cpb.KeyValue{kvInt}, }, { "int slice", []attribute.KeyValue{attrIntSlice}, []*cpb.KeyValue{kvIntSlice}, }, { "int64", []attribute.KeyValue{attrInt64}, []*cpb.KeyValue{kvInt64}, }, { "int64 slice", []attribute.KeyValue{attrInt64Slice}, []*cpb.KeyValue{kvInt64Slice}, }, { "float64", []attribute.KeyValue{attrFloat64}, []*cpb.KeyValue{kvFloat64}, }, { "float64 slice", []attribute.KeyValue{attrFloat64Slice}, []*cpb.KeyValue{kvFloat64Slice}, }, { "string", []attribute.KeyValue{attrString}, []*cpb.KeyValue{kvString}, }, { "string slice", []attribute.KeyValue{attrStringSlice}, []*cpb.KeyValue{kvStringSlice}, }, { "all", []attribute.KeyValue{ attrBool, attrBoolSlice, attrInt, attrIntSlice, attrInt64, attrInt64Slice, attrFloat64, attrFloat64Slice, attrString, attrStringSlice, attrInvalid, }, []*cpb.KeyValue{ kvBool, kvBoolSlice, kvInt, kvIntSlice, kvInt64, kvInt64Slice, kvFloat64, kvFloat64Slice, kvString, kvStringSlice, kvInvalid, }, }, } { t.Run(test.name, func(t *testing.T) { t.Run("KeyValues", func(t *testing.T) { assert.ElementsMatch(t, test.want, KeyValues(test.in)) }) t.Run("AttrIter", func(t *testing.T) { s := attribute.NewSet(test.in...) assert.ElementsMatch(t, test.want, AttrIter(s.Iter())) }) }) } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error.go000066400000000000000000000060001452547353200326400ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" import ( "errors" "fmt" "strings" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) var ( errUnknownAggregation = errors.New("unknown aggregation") errUnknownTemporality = errors.New("unknown temporality") ) type errMetric struct { m *mpb.Metric err error } func (e errMetric) Unwrap() error { return e.err } func (e errMetric) Error() string { format := "invalid metric (name: %q, description: %q, unit: %q): %s" return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err) } func (e errMetric) Is(target error) bool { return errors.Is(e.err, target) } // multiErr is used by the data-type transform functions to wrap multiple // errors into a single return value. The error message will show all errors // as a list and scope them by the datatype name that is returning them. type multiErr struct { datatype string errs []error } // errOrNil returns nil if e contains no errors, otherwise it returns e. func (e *multiErr) errOrNil() error { if len(e.errs) == 0 { return nil } return e } // append adds err to e. If err is a multiErr, its errs are flattened into e. func (e *multiErr) append(err error) { // Do not use errors.As here, this should only be flattened one layer. If // there is a *multiErr several steps down the chain, all the errors above // it will be discarded if errors.As is used instead. switch other := err.(type) { case *multiErr: // Flatten err errors into e. e.errs = append(e.errs, other.errs...) default: e.errs = append(e.errs, err) } } func (e *multiErr) Error() string { es := make([]string, len(e.errs)) for i, err := range e.errs { es[i] = fmt.Sprintf("* %s", err) } format := "%d errors occurred transforming %s:\n\t%s" return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t")) } func (e *multiErr) Unwrap() error { switch len(e.errs) { case 0: return nil case 1: return e.errs[0] } // Return a multiErr without the leading error. cp := &multiErr{ datatype: e.datatype, errs: make([]error, len(e.errs)-1), } copy(cp.errs, e.errs[1:]) return cp } func (e *multiErr) Is(target error) bool { if len(e.errs) == 0 { return false } // Check if the first error is target. return errors.Is(e.errs[0], target) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/error_test.go000066400000000000000000000050751452547353200337120ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( e0 = errMetric{m: pbMetrics[0], err: errUnknownAggregation} e1 = errMetric{m: pbMetrics[1], err: errUnknownTemporality} ) type testingErr struct{} func (testingErr) Error() string { return "testing error" } // errFunc is a non-comparable error type. type errFunc func() string func (e errFunc) Error() string { return e() } func TestMultiErr(t *testing.T) { const name = "TestMultiErr" me := &multiErr{datatype: name} t.Run("ErrOrNil", func(t *testing.T) { require.Nil(t, me.errOrNil()) me.errs = []error{e0} assert.Error(t, me.errOrNil()) }) var testErr testingErr t.Run("AppendError", func(t *testing.T) { me.append(testErr) assert.Equal(t, testErr, me.errs[len(me.errs)-1]) }) t.Run("AppendFlattens", func(t *testing.T) { other := &multiErr{datatype: "OtherTestMultiErr", errs: []error{e1}} me.append(other) assert.Equal(t, e1, me.errs[len(me.errs)-1]) }) t.Run("ErrorMessage", func(t *testing.T) { // Test the overall structure of the message, but not the exact // language so this doesn't become a change-indicator. msg := me.Error() lines := strings.Split(msg, "\n") assert.Equalf(t, 4, len(lines), "expected a 4 line error message, got:\n\n%s", msg) assert.Contains(t, msg, name) assert.Contains(t, msg, e0.Error()) assert.Contains(t, msg, testErr.Error()) assert.Contains(t, msg, e1.Error()) }) t.Run("ErrorIs", func(t *testing.T) { assert.ErrorIs(t, me, errUnknownAggregation) assert.ErrorIs(t, me, e0) assert.ErrorIs(t, me, testErr) assert.ErrorIs(t, me, errUnknownTemporality) assert.ErrorIs(t, me, e1) errUnknown := errFunc(func() string { return "unknown error" }) assert.NotErrorIs(t, me, errUnknown) var empty multiErr assert.NotErrorIs(t, &empty, errUnknownTemporality) }) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata.go000066400000000000000000000226431452547353200336370ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package transform provides transformation functionality from the // sdk/metric/metricdata data-types into OTLP data-types. package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform" import ( "fmt" "time" "go.opentelemetry.io/otel/sdk/metric/metricdata" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) // ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm // contains invalid ScopeMetrics, an error will be returned along with an OTLP // ResourceMetrics that contains partial OTLP ScopeMetrics. func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) { sms, err := ScopeMetrics(rm.ScopeMetrics) return &mpb.ResourceMetrics{ Resource: &rpb.Resource{ Attributes: AttrIter(rm.Resource.Iter()), }, ScopeMetrics: sms, SchemaUrl: rm.Resource.SchemaURL(), }, err } // ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If // sms contains invalid metric values, an error will be returned along with a // slice that contains partial OTLP ScopeMetrics. func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { errs := &multiErr{datatype: "ScopeMetrics"} out := make([]*mpb.ScopeMetrics, 0, len(sms)) for _, sm := range sms { ms, err := Metrics(sm.Metrics) if err != nil { errs.append(err) } out = append(out, &mpb.ScopeMetrics{ Scope: &cpb.InstrumentationScope{ Name: sm.Scope.Name, Version: sm.Scope.Version, }, Metrics: ms, SchemaUrl: sm.Scope.SchemaURL, }) } return out, errs.errOrNil() } // Metrics returns a slice of OTLP Metric generated from ms. If ms contains // invalid metric values, an error will be returned along with a slice that // contains partial OTLP Metrics. func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) { errs := &multiErr{datatype: "Metrics"} out := make([]*mpb.Metric, 0, len(ms)) for _, m := range ms { o, err := metric(m) if err != nil { // Do not include invalid data. Drop the metric, report the error. errs.append(errMetric{m: o, err: err}) continue } out = append(out, o) } return out, errs.errOrNil() } func metric(m metricdata.Metrics) (*mpb.Metric, error) { var err error out := &mpb.Metric{ Name: m.Name, Description: m.Description, Unit: string(m.Unit), } switch a := m.Data.(type) { case metricdata.Gauge[int64]: out.Data = Gauge[int64](a) case metricdata.Gauge[float64]: out.Data = Gauge[float64](a) case metricdata.Sum[int64]: out.Data, err = Sum[int64](a) case metricdata.Sum[float64]: out.Data, err = Sum[float64](a) case metricdata.Histogram[int64]: out.Data, err = Histogram(a) case metricdata.Histogram[float64]: out.Data, err = Histogram(a) case metricdata.ExponentialHistogram[int64]: out.Data, err = ExponentialHistogram(a) case metricdata.ExponentialHistogram[float64]: out.Data, err = ExponentialHistogram(a) default: return out, fmt.Errorf("%w: %T", errUnknownAggregation, a) } return out, err } // Gauge returns an OTLP Metric_Gauge generated from g. func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge { return &mpb.Metric_Gauge{ Gauge: &mpb.Gauge{ DataPoints: DataPoints(g.DataPoints), }, } } // Sum returns an OTLP Metric_Sum generated from s. An error is returned // if the temporality of s is unknown. func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) { t, err := Temporality(s.Temporality) if err != nil { return nil, err } return &mpb.Metric_Sum{ Sum: &mpb.Sum{ AggregationTemporality: t, IsMonotonic: s.IsMonotonic, DataPoints: DataPoints(s.DataPoints), }, }, nil } // DataPoints returns a slice of OTLP NumberDataPoint generated from dPts. func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint { out := make([]*mpb.NumberDataPoint, 0, len(dPts)) for _, dPt := range dPts { ndp := &mpb.NumberDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), } switch v := any(dPt.Value).(type) { case int64: ndp.Value = &mpb.NumberDataPoint_AsInt{ AsInt: v, } case float64: ndp.Value = &mpb.NumberDataPoint_AsDouble{ AsDouble: v, } } out = append(out, ndp) } return out } // Histogram returns an OTLP Metric_Histogram generated from h. An error is // returned if the temporality of h is unknown. func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err } return &mpb.Metric_Histogram{ Histogram: &mpb.Histogram{ AggregationTemporality: t, DataPoints: HistogramDataPoints(h.DataPoints), }, }, nil } // HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated // from dPts. func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint { out := make([]*mpb.HistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) hdp := &mpb.HistogramDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), Count: dPt.Count, Sum: &sum, BucketCounts: dPt.BucketCounts, ExplicitBounds: dPt.Bounds, } if v, ok := dPt.Min.Value(); ok { vF64 := float64(v) hdp.Min = &vF64 } if v, ok := dPt.Max.Value(); ok { vF64 := float64(v) hdp.Max = &vF64 } out = append(out, hdp) } return out } // ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is // returned if the temporality of h is unknown. func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err } return &mpb.Metric_ExponentialHistogram{ ExponentialHistogram: &mpb.ExponentialHistogram{ AggregationTemporality: t, DataPoints: ExponentialHistogramDataPoints(h.DataPoints), }, }, nil } // ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated // from dPts. func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) ehdp := &mpb.ExponentialHistogramDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), Count: dPt.Count, Sum: &sum, Scale: dPt.Scale, ZeroCount: dPt.ZeroCount, Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket), Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket), } if v, ok := dPt.Min.Value(); ok { vF64 := float64(v) ehdp.Min = &vF64 } if v, ok := dPt.Max.Value(); ok { vF64 := float64(v) ehdp.Max = &vF64 } out = append(out, ehdp) } return out } // ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated // from bucket. func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { return &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: bucket.Offset, BucketCounts: bucket.Counts, } } // Temporality returns an OTLP AggregationTemporality generated from t. If t // is unknown, an error is returned along with the invalid // AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED. func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { switch t { case metricdata.DeltaTemporality: return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil case metricdata.CumulativeTemporality: return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil default: err := fmt.Errorf("%w: %s", errUnknownTemporality, t) return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err } } // timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed // since January 1, 1970 UTC as uint64. // The result is undefined if the Unix time // in nanoseconds cannot be represented by an int64 // (a date before the year 1678 or after 2262). // timeUnixNano on the zero Time returns 0. // The result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { if t.IsZero() { return 0 } return uint64(t.UnixNano()) } metricdata_test.go000066400000000000000000000437171452547353200346240ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) type unknownAggT struct { metricdata.Aggregation } var ( // Sat Jan 01 2000 00:00:00 GMT+0000. start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0)) end = start.Add(30 * time.Second) alice = attribute.NewSet(attribute.String("user", "alice")) bob = attribute.NewSet(attribute.String("user", "bob")) pbAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "alice"}, }} pbBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "bob"}, }} minA, maxA, sumA = 2.0, 4.0, 90.0 minB, maxB, sumB = 4.0, 150.0, 234.0 otelHDPInt64 = []metricdata.HistogramDataPoint[int64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: metricdata.NewExtrema(int64(minA)), Max: metricdata.NewExtrema(int64(maxA)), Sum: int64(sumA), }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: metricdata.NewExtrema(int64(minB)), Max: metricdata.NewExtrema(int64(maxB)), Sum: int64(sumB), }, } otelHDPFloat64 = []metricdata.HistogramDataPoint[float64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: metricdata.NewExtrema(minA), Max: metricdata.NewExtrema(maxA), Sum: sumA, }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: metricdata.NewExtrema(minB), Max: metricdata.NewExtrema(maxB), Sum: sumB, }, } otelEBucketA = metricdata.ExponentialBucket{ Offset: 5, Counts: []uint64{0, 5, 0, 5}, } otelEBucketB = metricdata.ExponentialBucket{ Offset: 3, Counts: []uint64{0, 5, 0, 5}, } otelEBucketsC = metricdata.ExponentialBucket{ Offset: 5, Counts: []uint64{0, 1}, } otelEBucketsD = metricdata.ExponentialBucket{ Offset: 3, Counts: []uint64{0, 1}, } otelEHDPInt64 = []metricdata.ExponentialHistogramDataPoint[int64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Scale: 2, ZeroCount: 10, PositiveBucket: otelEBucketA, NegativeBucket: otelEBucketB, ZeroThreshold: .01, Min: metricdata.NewExtrema(int64(minA)), Max: metricdata.NewExtrema(int64(maxA)), Sum: int64(sumA), }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Scale: 4, ZeroCount: 1, PositiveBucket: otelEBucketsC, NegativeBucket: otelEBucketsD, ZeroThreshold: .02, Min: metricdata.NewExtrema(int64(minB)), Max: metricdata.NewExtrema(int64(maxB)), Sum: int64(sumB), }, } otelEHDPFloat64 = []metricdata.ExponentialHistogramDataPoint[float64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Scale: 2, ZeroCount: 10, PositiveBucket: otelEBucketA, NegativeBucket: otelEBucketB, ZeroThreshold: .01, Min: metricdata.NewExtrema(minA), Max: metricdata.NewExtrema(maxA), Sum: sumA, }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Scale: 4, ZeroCount: 1, PositiveBucket: otelEBucketsC, NegativeBucket: otelEBucketsD, ZeroThreshold: .02, Min: metricdata.NewExtrema(minB), Max: metricdata.NewExtrema(maxB), Sum: sumB, }, } pbHDP = []*mpb.HistogramDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sumA, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: &minA, Max: &maxA, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 3, Sum: &sumB, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: &minB, Max: &maxB, }, } pbEHDPBA = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 5, BucketCounts: []uint64{0, 5, 0, 5}, } pbEHDPBB = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 3, BucketCounts: []uint64{0, 5, 0, 5}, } pbEHDPBC = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 5, BucketCounts: []uint64{0, 1}, } pbEHDPBD = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 3, BucketCounts: []uint64{0, 1}, } pbEHDP = []*mpb.ExponentialHistogramDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sumA, Scale: 2, ZeroCount: 10, Positive: pbEHDPBA, Negative: pbEHDPBB, Min: &minA, Max: &maxA, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 3, Sum: &sumB, Scale: 4, ZeroCount: 1, Positive: pbEHDPBC, Negative: pbEHDPBD, Min: &minB, Max: &maxB, }, } otelHistInt64 = metricdata.Histogram[int64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelHDPInt64, } otelHistFloat64 = metricdata.Histogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelHDPFloat64, } invalidTemporality metricdata.Temporality otelHistInvalid = metricdata.Histogram[int64]{ Temporality: invalidTemporality, DataPoints: otelHDPInt64, } otelExpoHistInt64 = metricdata.ExponentialHistogram[int64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelEHDPInt64, } otelExpoHistFloat64 = metricdata.ExponentialHistogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelEHDPFloat64, } otelExpoHistInvalid = metricdata.ExponentialHistogram[int64]{ Temporality: invalidTemporality, DataPoints: otelEHDPInt64, } pbHist = &mpb.Histogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: pbHDP, } pbExpoHist = &mpb.ExponentialHistogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: pbEHDP, } otelDPtsInt64 = []metricdata.DataPoint[int64]{ {Attributes: alice, StartTime: start, Time: end, Value: 1}, {Attributes: bob, StartTime: start, Time: end, Value: 2}, } otelDPtsFloat64 = []metricdata.DataPoint[float64]{ {Attributes: alice, StartTime: start, Time: end, Value: 1.0}, {Attributes: bob, StartTime: start, Time: end, Value: 2.0}, } pbDPtsInt64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 2}, }, } pbDPtsFloat64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0}, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0}, }, } otelSumInt64 = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: otelDPtsInt64, } otelSumFloat64 = metricdata.Sum[float64]{ Temporality: metricdata.DeltaTemporality, IsMonotonic: false, DataPoints: otelDPtsFloat64, } otelSumInvalid = metricdata.Sum[float64]{ Temporality: invalidTemporality, IsMonotonic: false, DataPoints: otelDPtsFloat64, } pbSumInt64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, IsMonotonic: true, DataPoints: pbDPtsInt64, } pbSumFloat64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, IsMonotonic: false, DataPoints: pbDPtsFloat64, } otelGaugeInt64 = metricdata.Gauge[int64]{DataPoints: otelDPtsInt64} otelGaugeFloat64 = metricdata.Gauge[float64]{DataPoints: otelDPtsFloat64} otelGaugeZeroStartTime = metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: alice, StartTime: time.Time{}, Time: end, Value: 1}, }, } pbGaugeInt64 = &mpb.Gauge{DataPoints: pbDPtsInt64} pbGaugeFloat64 = &mpb.Gauge{DataPoints: pbDPtsFloat64} pbGaugeZeroStartTime = &mpb.Gauge{DataPoints: []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: 0, TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, }} unknownAgg unknownAggT otelMetrics = []metricdata.Metrics{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: otelGaugeInt64, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: otelGaugeFloat64, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: otelSumInt64, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: otelSumFloat64, }, { Name: "invalid-sum", Description: "Sum with invalid temporality", Unit: "1", Data: otelSumInvalid, }, { Name: "int64-histogram", Description: "Histogram", Unit: "1", Data: otelHistInt64, }, { Name: "float64-histogram", Description: "Histogram", Unit: "1", Data: otelHistFloat64, }, { Name: "invalid-histogram", Description: "Invalid histogram", Unit: "1", Data: otelHistInvalid, }, { Name: "unknown", Description: "Unknown aggregation", Unit: "1", Data: unknownAgg, }, { Name: "int64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: otelExpoHistInt64, }, { Name: "float64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: otelExpoHistFloat64, }, { Name: "invalid-ExponentialHistogram", Description: "Invalid Exponential Histogram", Unit: "1", Data: otelExpoHistInvalid, }, { Name: "zero-time", Description: "Gauge with 0 StartTime", Unit: "1", Data: otelGaugeZeroStartTime, }, } pbMetrics = []*mpb.Metric{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: pbSumInt64}, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: pbSumFloat64}, }, { Name: "int64-histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: pbHist}, }, { Name: "float64-histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: pbHist}, }, { Name: "int64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, }, { Name: "float64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, }, { Name: "zero-time", Description: "Gauge with 0 StartTime", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeZeroStartTime}, }, } otelScopeMetrics = []metricdata.ScopeMetrics{ { Scope: instrumentation.Scope{ Name: "test/code/path", Version: "v0.1.0", SchemaURL: semconv.SchemaURL, }, Metrics: otelMetrics, }, } pbScopeMetrics = []*mpb.ScopeMetrics{ { Scope: &cpb.InstrumentationScope{ Name: "test/code/path", Version: "v0.1.0", }, Metrics: pbMetrics, SchemaUrl: semconv.SchemaURL, }, } otelRes = resource.NewWithAttributes( semconv.SchemaURL, semconv.ServiceName("test server"), semconv.ServiceVersion("v0.1.0"), ) pbRes = &rpb.Resource{ Attributes: []*cpb.KeyValue{ { Key: "service.name", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "test server"}, }, }, { Key: "service.version", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"}, }, }, }, } otelResourceMetrics = &metricdata.ResourceMetrics{ Resource: otelRes, ScopeMetrics: otelScopeMetrics, } pbResourceMetrics = &mpb.ResourceMetrics{ Resource: pbRes, ScopeMetrics: pbScopeMetrics, SchemaUrl: semconv.SchemaURL, } ) func TestTransformations(t *testing.T) { // Run tests from the "bottom-up" of the metricdata data-types and halt // when a failure occurs to ensure the clearest failure message (as // opposed to the opposite of testing from the top-down which will obscure // errors deep inside the structs). // DataPoint types. assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPInt64)) assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPFloat64)) assert.Equal(t, pbDPtsInt64, DataPoints[int64](otelDPtsInt64)) require.Equal(t, pbDPtsFloat64, DataPoints[float64](otelDPtsFloat64)) assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPInt64)) assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPFloat64)) assert.Equal(t, pbEHDPBA, ExponentialHistogramDataPointBuckets(otelEBucketA)) // Aggregations. h, err := Histogram(otelHistInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h) h, err = Histogram(otelHistFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h) h, err = Histogram(otelHistInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, h) s, err := Sum[int64](otelSumInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumInt64}, s) s, err = Sum[float64](otelSumFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumFloat64}, s) s, err = Sum[float64](otelSumInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, s) assert.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, Gauge[int64](otelGaugeInt64)) require.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, Gauge[float64](otelGaugeFloat64)) e, err := ExponentialHistogram(otelExpoHistInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e) e, err = ExponentialHistogram(otelExpoHistFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e) e, err = ExponentialHistogram(otelExpoHistInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, e) // Metrics. m, err := Metrics(otelMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbMetrics, m) // Scope Metrics. sm, err := ScopeMetrics(otelScopeMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbScopeMetrics, sm) // Resource Metrics. rm, err := ResourceMetrics(otelResourceMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbResourceMetrics, rm) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go000066400000000000000000000015161452547353200273540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" // Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use. func Version() string { return "0.44.0" } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetricgrpc/version_test.go000066400000000000000000000020631452547353200304110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetricgrpc import ( "regexp" "testing" "github.com/stretchr/testify/assert" ) // regex taken from https://github.com/Masterminds/semver/tree/v3.1.1 var versionRegex = regexp.MustCompile(`^v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$`) func TestVersionSemver(t *testing.T) { v := Version() assert.NotNil(t, versionRegex.FindStringSubmatch(v), "version is not semver: %s", v) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/000077500000000000000000000000001452547353200253615ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/client.go000066400000000000000000000202771452547353200271760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" import ( "bytes" "compress/gzip" "context" "errors" "fmt" "io" "net" "net/http" "net/url" "strconv" "sync" "time" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { // req is cloned for every upload the client makes. req *http.Request compression Compression requestFunc retry.RequestFunc httpClient *http.Client } // Keep it in sync with golang's DefaultTransport from net/http! We // have our own copy to avoid handling a situation where the // DefaultTransport is overwritten with some different implementation // of http.RoundTripper or it's modified by another package. var ourTransport = &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext, ForceAttemptHTTP2: true, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, } // newClient creates a new HTTP metric client. func newClient(cfg oconf.Config) (*client, error) { httpClient := &http.Client{ Transport: ourTransport, Timeout: cfg.Metrics.Timeout, } if cfg.Metrics.TLSCfg != nil { transport := ourTransport.Clone() transport.TLSClientConfig = cfg.Metrics.TLSCfg httpClient.Transport = transport } u := &url.URL{ Scheme: "https", Host: cfg.Metrics.Endpoint, Path: cfg.Metrics.URLPath, } if cfg.Metrics.Insecure { u.Scheme = "http" } // Body is set when this is cloned during upload. req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) if err != nil { return nil, err } userAgent := "OTel Go OTLP over HTTP/protobuf metrics exporter/" + Version() req.Header.Set("User-Agent", userAgent) if n := len(cfg.Metrics.Headers); n > 0 { for k, v := range cfg.Metrics.Headers { req.Header.Set(k, v) } } req.Header.Set("Content-Type", "application/x-protobuf") return &client{ compression: Compression(cfg.Metrics.Compression), req: req, requestFunc: cfg.RetryConfig.RequestFunc(evaluate), httpClient: httpClient, }, nil } // Shutdown shuts down the client, freeing all resources. func (c *client) Shutdown(ctx context.Context) error { // The otlpmetric.Exporter synchronizes access to client methods and // ensures this is called only once. The only thing that needs to be done // here is to release any computational resources the client holds. c.requestFunc = nil c.httpClient = nil return ctx.Err() } // UploadMetrics sends protoMetrics to the connected endpoint. // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { // The otlpmetric.Exporter synchronizes access to client methods, and // ensures this is not called after the Exporter is shutdown. Only thing // to do here is send data. pbRequest := &colmetricpb.ExportMetricsServiceRequest{ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics}, } body, err := proto.Marshal(pbRequest) if err != nil { return err } request, err := c.newRequest(ctx, body) if err != nil { return err } return c.requestFunc(ctx, func(iCtx context.Context) error { select { case <-iCtx.Done(): return iCtx.Err() default: } request.reset(iCtx) resp, err := c.httpClient.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { return newResponseError(http.Header{}) } if err != nil { return err } var rErr error switch sc := resp.StatusCode; { case sc >= 200 && sc <= 299: // Success, do not retry. // Read the partial success message, if any. var respData bytes.Buffer if _, err := io.Copy(&respData, resp.Body); err != nil { return err } if respData.Len() == 0 { return nil } if resp.Header.Get("Content-Type") == "application/x-protobuf" { var respProto colmetricpb.ExportMetricsServiceResponse if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil { return err } if respProto.PartialSuccess != nil { msg := respProto.PartialSuccess.GetErrorMessage() n := respProto.PartialSuccess.GetRejectedDataPoints() if n != 0 || msg != "" { err := internal.MetricPartialSuccessError(n, msg) otel.Handle(err) } } } return nil case sc == http.StatusTooManyRequests, sc == http.StatusBadGateway, sc == http.StatusServiceUnavailable, sc == http.StatusGatewayTimeout: // Retry-able failure. rErr = newResponseError(resp.Header) // Going to retry, drain the body to reuse the connection. if _, err := io.Copy(io.Discard, resp.Body); err != nil { _ = resp.Body.Close() return err } default: rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status) } if err := resp.Body.Close(); err != nil { return err } return rErr }) } var gzPool = sync.Pool{ New: func() interface{} { w := gzip.NewWriter(io.Discard) return w }, } func (c *client) newRequest(ctx context.Context, body []byte) (request, error) { r := c.req.Clone(ctx) req := request{Request: r} switch c.compression { case NoCompression: r.ContentLength = (int64)(len(body)) req.bodyReader = bodyReader(body) case GzipCompression: // Ensure the content length is not used. r.ContentLength = -1 r.Header.Set("Content-Encoding", "gzip") gz := gzPool.Get().(*gzip.Writer) defer gzPool.Put(gz) var b bytes.Buffer gz.Reset(&b) if _, err := gz.Write(body); err != nil { return req, err } // Close needs to be called to ensure body if fully written. if err := gz.Close(); err != nil { return req, err } req.bodyReader = bodyReader(b.Bytes()) } return req, nil } // bodyReader returns a closure returning a new reader for buf. func bodyReader(buf []byte) func() io.ReadCloser { return func() io.ReadCloser { return io.NopCloser(bytes.NewReader(buf)) } } // request wraps an http.Request with a resettable body reader. type request struct { *http.Request // bodyReader allows the same body to be used for multiple requests. bodyReader func() io.ReadCloser } // reset reinitializes the request Body and uses ctx for the request. func (r *request) reset(ctx context.Context) { r.Body = r.bodyReader() r.Request = r.Request.WithContext(ctx) } // retryableError represents a request failure that can be retried. type retryableError struct { throttle int64 } // newResponseError returns a retryableError and will extract any explicit // throttle delay contained in headers. func newResponseError(header http.Header) error { var rErr retryableError if v := header.Get("Retry-After"); v != "" { if t, err := strconv.ParseInt(v, 10, 64); err == nil { rErr.throttle = t } } return rErr } func (e retryableError) Error() string { return "retry-able request failure" } // evaluate returns if err is retry-able. If it is and it includes an explicit // throttling delay, that delay is also returned. func evaluate(err error) (bool, time.Duration) { if err == nil { return false, 0 } rErr, ok := err.(retryableError) if !ok { return false, 0 } return true, time.Duration(rErr.throttle) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go000066400000000000000000000170651452547353200302360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetrichttp import ( "context" "crypto/tls" "errors" "fmt" "net/http" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type clientShim struct { *client } func (clientShim) Temporality(metric.InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } func (clientShim) Aggregation(metric.InstrumentKind) metric.Aggregation { return nil } func (clientShim) ForceFlush(ctx context.Context) error { return ctx.Err() } func TestClient(t *testing.T) { factory := func(rCh <-chan otest.ExportResult) (otest.Client, otest.Collector) { coll, err := otest.NewHTTPCollector("", rCh) require.NoError(t, err) addr := coll.Addr().String() opts := []Option{WithEndpoint(addr), WithInsecure()} cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...) client, err := newClient(cfg) require.NoError(t, err) return clientShim{client}, coll } t.Run("Integration", otest.RunClientTests(factory)) } func TestClientWithHTTPCollectorRespondingPlainText(t *testing.T) { ctx := context.Background() coll, err := otest.NewHTTPCollector("", nil, otest.WithHTTPCollectorRespondingPlainText()) require.NoError(t, err) addr := coll.Addr().String() opts := []Option{WithEndpoint(addr), WithInsecure()} cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...) client, err := newClient(cfg) require.NoError(t, err) require.NoError(t, client.UploadMetrics(ctx, &mpb.ResourceMetrics{})) require.NoError(t, client.Shutdown(ctx)) got := coll.Collect().Dump() require.Len(t, got, 1, "upload of one ResourceMetrics") } func TestNewWithInvalidEndpoint(t *testing.T) { ctx := context.Background() exp, err := New(ctx, WithEndpoint("host:invalid-port")) assert.Error(t, err) assert.Nil(t, exp) } func TestConfig(t *testing.T) { factoryFunc := func(ePt string, rCh <-chan otest.ExportResult, o ...Option) (metric.Exporter, *otest.HTTPCollector) { coll, err := otest.NewHTTPCollector(ePt, rCh) require.NoError(t, err) opts := []Option{WithEndpoint(coll.Addr().String())} if !strings.HasPrefix(strings.ToLower(ePt), "https") { opts = append(opts, WithInsecure()) } opts = append(opts, o...) ctx := context.Background() exp, err := New(ctx, opts...) require.NoError(t, err) return exp, coll } t.Run("WithHeaders", func(t *testing.T) { key := http.CanonicalHeaderKey("my-custom-header") headers := map[string]string{key: "custom-value"} exp, coll := factoryFunc("", nil, WithHeaders(headers)) ctx := context.Background() t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) require.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{})) // Ensure everything is flushed. require.NoError(t, exp.Shutdown(ctx)) got := coll.Headers() require.Regexp(t, "OTel Go OTLP over HTTP/protobuf metrics exporter/[01]\\..*", got) require.Contains(t, got, key) assert.Equal(t, got[key], []string{headers[key]}) }) t.Run("WithTimeout", func(t *testing.T) { // Do not send on rCh so the Collector never responds to the client. rCh := make(chan otest.ExportResult) exp, coll := factoryFunc( "", rCh, WithTimeout(time.Millisecond), WithRetry(RetryConfig{Enabled: false}), ) ctx := context.Background() t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) // Push this after Shutdown so the HTTP server doesn't hang. t.Cleanup(func() { close(rCh) }) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) err := exp.Export(ctx, &metricdata.ResourceMetrics{}) assert.ErrorAs(t, err, new(retryableError)) }) t.Run("WithCompressionGZip", func(t *testing.T) { exp, coll := factoryFunc("", nil, WithCompression(GzipCompression)) ctx := context.Background() t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) assert.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{})) assert.Len(t, coll.Collect().Dump(), 1) }) t.Run("WithRetry", func(t *testing.T) { emptyErr := errors.New("") rCh := make(chan otest.ExportResult, 5) header := http.Header{http.CanonicalHeaderKey("Retry-After"): {"10"}} // All retryable errors. rCh <- otest.ExportResult{Err: &otest.HTTPResponseError{ Status: http.StatusServiceUnavailable, Err: emptyErr, Header: header, }} rCh <- otest.ExportResult{Err: &otest.HTTPResponseError{ Status: http.StatusTooManyRequests, Err: emptyErr, }} rCh <- otest.ExportResult{Err: &otest.HTTPResponseError{ Status: http.StatusGatewayTimeout, Err: emptyErr, }} rCh <- otest.ExportResult{Err: &otest.HTTPResponseError{ Status: http.StatusBadGateway, Err: emptyErr, }} rCh <- otest.ExportResult{} exp, coll := factoryFunc("", rCh, WithRetry(RetryConfig{ Enabled: true, InitialInterval: time.Nanosecond, MaxInterval: time.Millisecond, MaxElapsedTime: time.Minute, })) ctx := context.Background() t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) // Push this after Shutdown so the HTTP server doesn't hang. t.Cleanup(func() { close(rCh) }) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) assert.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{}), "failed retry") assert.Len(t, rCh, 0, "failed HTTP responses did not occur") }) t.Run("WithURLPath", func(t *testing.T) { path := "/prefix/v2/metrics" ePt := fmt.Sprintf("http://localhost:0%s", path) exp, coll := factoryFunc(ePt, nil, WithURLPath(path)) ctx := context.Background() t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) assert.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{})) assert.Len(t, coll.Collect().Dump(), 1) }) t.Run("WithTLSClientConfig", func(t *testing.T) { ePt := "https://localhost:0" tlsCfg := &tls.Config{InsecureSkipVerify: true} exp, coll := factoryFunc(ePt, nil, WithTLSClientConfig(tlsCfg)) ctx := context.Background() t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) assert.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{})) assert.Len(t, coll.Collect().Dump(), 1) }) t.Run("WithCustomUserAgent", func(t *testing.T) { key := http.CanonicalHeaderKey("user-agent") headers := map[string]string{key: "custom-user-agent"} exp, coll := factoryFunc("", nil, WithHeaders(headers)) ctx := context.Background() t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) require.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{})) // Ensure everything is flushed. require.NoError(t, exp.Shutdown(ctx)) got := coll.Headers() require.Contains(t, got, key) assert.Equal(t, got[key], []string{headers[key]}) }) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/config.go000066400000000000000000000202301452547353200271520ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" import ( "crypto/tls" "time" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" "go.opentelemetry.io/otel/sdk/metric" ) // Compression describes the compression used for payloads sent to the // collector. type Compression oconf.Compression const ( // NoCompression tells the driver to send payloads without // compression. NoCompression = Compression(oconf.NoCompression) // GzipCompression tells the driver to send payloads after // compressing them with gzip. GzipCompression = Compression(oconf.GzipCompression) ) // Option applies an option to the Exporter. type Option interface { applyHTTPOption(oconf.Config) oconf.Config } func asHTTPOptions(opts []Option) []oconf.HTTPOption { converted := make([]oconf.HTTPOption, len(opts)) for i, o := range opts { converted[i] = oconf.NewHTTPOption(o.applyHTTPOption) } return converted } // RetryConfig defines configuration for retrying the export of metric data // that failed. type RetryConfig retry.Config type wrappedOption struct { oconf.HTTPOption } func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config { return w.ApplyHTTPOption(cfg) } // WithEndpoint sets the target endpoint the Exporter will connect to. This // endpoint is specified as a host and optional port, no path or scheme should // be included (see WithInsecure and WithURLPath). // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, "localhost:4318" will be used. func WithEndpoint(endpoint string) Option { return wrappedOption{oconf.WithEndpoint(endpoint)} } // WithCompression sets the compression strategy the Exporter will use to // compress the HTTP body. // // If the OTEL_EXPORTER_OTLP_COMPRESSION or // OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and // this option is not passed, that variable value will be used. That value can // be either "none" or "gzip". If both are set, // OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, no compression strategy will be used. func WithCompression(compression Compression) Option { return wrappedOption{oconf.WithCompression(oconf.Compression(compression))} } // WithURLPath sets the URL path the Exporter will send requests to. // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, the path // contained in that variable value will be used. If both are set, // OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, "/v1/metrics" will be used. func WithURLPath(urlPath string) Option { return wrappedOption{oconf.WithURLPath(urlPath)} } // WithTLSClientConfig sets the TLS configuration the Exporter will use for // HTTP requests. // // If the OTEL_EXPORTER_OTLP_CERTIFICATE or // OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and // this option is not passed, that variable value will be used. The value will // be parsed the filepath of the TLS certificate chain to use. If both are // set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, the system default configuration is used. func WithTLSClientConfig(tlsCfg *tls.Config) Option { return wrappedOption{oconf.WithTLSClientConfig(tlsCfg)} } // WithInsecure disables client transport security for the Exporter's HTTP // connection. // // If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used to determine client security. If the endpoint has a // scheme of "http" or "unix" client security will be disabled. If both are // set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, client security will be used. func WithInsecure() Option { return wrappedOption{oconf.WithInsecure()} } // WithHeaders will send the provided headers with each HTTP requests. // // If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS // environment variable is set, and this option is not passed, that variable // value will be used. The value will be parsed as a list of key value pairs. // These pairs are expected to be in the W3C Correlation-Context format // without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If // both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, no user headers will be set. func WithHeaders(headers map[string]string) Option { return wrappedOption{oconf.WithHeaders(headers)} } // WithTimeout sets the max amount of time an Exporter will attempt an export. // // This takes precedence over any retry settings defined by WithRetry. Once // this time limit has been reached the export is abandoned and the metric // data is dropped. // // If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT // environment variable is set, and this option is not passed, that variable // value will be used. The value will be parsed as an integer representing the // timeout in milliseconds. If both are set, // OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence. // // By default, if an environment variable is not set, and this option is not // passed, a timeout of 10 seconds will be used. func WithTimeout(duration time.Duration) Option { return wrappedOption{oconf.WithTimeout(duration)} } // WithRetry sets the retry policy for transient retryable errors that are // returned by the target endpoint. // // If the target endpoint responds with not only a retryable error, but // explicitly returns a backoff time in the response, that time will take // precedence over these settings. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially // after each error for no more than a total time of 1 minute. func WithRetry(rc RetryConfig) Option { return wrappedOption{oconf.WithRetry(retry.Config(rc))} } // WithTemporalitySelector sets the TemporalitySelector the client will use to // determine the Temporality of an instrument based on its kind. If this option // is not used, the client will use the DefaultTemporalitySelector from the // go.opentelemetry.io/otel/sdk/metric package. func WithTemporalitySelector(selector metric.TemporalitySelector) Option { return wrappedOption{oconf.WithTemporalitySelector(selector)} } // WithAggregationSelector sets the AggregationSelector the client will use to // determine the aggregation to use for an instrument based on its kind. If // this option is not used, the reader will use the DefaultAggregationSelector // from the go.opentelemetry.io/otel/sdk/metric package, or the aggregation // explicitly passed for a view matching an instrument. func WithAggregationSelector(selector metric.AggregationSelector) Option { return wrappedOption{oconf.WithAggregationSelector(selector)} } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go000066400000000000000000000132021452547353200264530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package otlpmetrichttp provides an OTLP metrics exporter using HTTP with protobuf payloads. By default the telemetry is sent to https://localhost:4318/v1/metrics. Exporter should be created using [New] and used with a [metric.PeriodicReader]. The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") - target base URL ("/v1/metrics" is appended) to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. The configuration can be overridden by OTEL_EXPORTER_OTLP_METRICS_ENDPOINT environment variable and by [WithEndpoint], [WithInsecure] options. OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4318/v1/metrics") - target URL to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. The configuration can be overridden by [WithEndpoint], [WitnInsecure], [WithURLPath] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], except that additional semi-colon delimited metadata is not supported. Example value: "key1=value1,key2=value2". OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. The configuration can be overridden by [WithHeaders] option. OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") - maximum time in milliseconds the OTLP exporter waits for each batch export. OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. The configuration can be overridden by [WithTimeout] option. OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) - compression strategy the exporter uses to compress the HTTP body. Supported values: "gzip". OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. The configuration can be overridden by [WithCompression] option. OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) - filepath to the trusted certificate to use when verifying a server's TLS credentials. OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. The configuration can be overridden by [WithTLSClientConfig] option. OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) - filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. The configuration can be overridden by [WithTLSClientConfig] option. OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) - filepath to the clients private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. The configuration can be overridden by [WithTLSClientConfig] option. OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") - aggregation temporality to use on the basis of instrument kind. Supported values: - "cumulative" - Cumulative aggregation temporality for all instrument kinds, - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds; Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds, - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds; Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds. The configuration can be overridden by [WithTemporalitySelector] option. OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") - default aggregation to use for histogram instruments. Supported values: - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation], - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation]. The configuration can be overridden by [WithAggregationSelector] option. [W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content [Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation [Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation */ package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/example_test.go000066400000000000000000000023041452547353200304010ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetrichttp_test import ( "context" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" "go.opentelemetry.io/otel/sdk/metric" ) func Example() { ctx := context.Background() exp, err := otlpmetrichttp.New(ctx) if err != nil { panic(err) } meterProvider := metric.NewMeterProvider(metric.WithReader(metric.NewPeriodicReader(exp))) defer func() { if err := meterProvider.Shutdown(ctx); err != nil { panic(err) } }() otel.SetMeterProvider(meterProvider) // From here, the meterProvider can be used by instrumentation to collect // telemetry. } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go000066400000000000000000000116301452547353200275610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" import ( "context" "fmt" "sync" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Exporter is a OpenTelemetry metric Exporter using protobufs over HTTP. type Exporter struct { // Ensure synchronous access to the client across all functionality. clientMu sync.Mutex client interface { UploadMetrics(context.Context, *metricpb.ResourceMetrics) error Shutdown(context.Context) error } temporalitySelector metric.TemporalitySelector aggregationSelector metric.AggregationSelector shutdownOnce sync.Once } func newExporter(c *client, cfg oconf.Config) (*Exporter, error) { ts := cfg.Metrics.TemporalitySelector if ts == nil { ts = func(metric.InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } } as := cfg.Metrics.AggregationSelector if as == nil { as = metric.DefaultAggregationSelector } return &Exporter{ client: c, temporalitySelector: ts, aggregationSelector: as, }, nil } // Temporality returns the Temporality to use for an instrument kind. func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality { return e.temporalitySelector(k) } // Aggregation returns the Aggregation to use for an instrument kind. func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation { return e.aggregationSelector(k) } // Export transforms and transmits metric data to an OTLP receiver. // // This method returns an error if called after Shutdown. // This method returns an error if the method is canceled by the passed context. func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error { defer global.Debug("OTLP/HTTP exporter export", "Data", rm) otlpRm, err := transform.ResourceMetrics(rm) // Best effort upload of transformable metrics. e.clientMu.Lock() upErr := e.client.UploadMetrics(ctx, otlpRm) e.clientMu.Unlock() if upErr != nil { if err == nil { return fmt.Errorf("failed to upload metrics: %w", upErr) } // Merge the two errors. return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr) } return err } // ForceFlush flushes any metric data held by an exporter. // // This method returns an error if called after Shutdown. // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. func (e *Exporter) ForceFlush(ctx context.Context) error { // The exporter and client hold no state, nothing to flush. return ctx.Err() } // Shutdown flushes all metric data held by an exporter and releases any held // computational resources. // // This method returns an error if called after Shutdown. // This method returns an error if the method is canceled by the passed context. // // This method is safe to call concurrently. func (e *Exporter) Shutdown(ctx context.Context) error { err := errShutdown e.shutdownOnce.Do(func() { e.clientMu.Lock() client := e.client e.client = shutdownClient{} e.clientMu.Unlock() err = client.Shutdown(ctx) }) return err } var errShutdown = fmt.Errorf("HTTP exporter is shutdown") type shutdownClient struct{} func (c shutdownClient) err(ctx context.Context) error { if err := ctx.Err(); err != nil { return err } return errShutdown } func (c shutdownClient) UploadMetrics(ctx context.Context, _ *metricpb.ResourceMetrics) error { return c.err(ctx) } func (c shutdownClient) Shutdown(ctx context.Context) error { return c.err(ctx) } // MarshalLog returns logging data about the Exporter. func (e *Exporter) MarshalLog() interface{} { return struct{ Type string }{Type: "OTLP/HTTP"} } // New returns an OpenTelemetry metric Exporter. The Exporter can be used with // a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving // endpoint using protobufs over HTTP. func New(_ context.Context, opts ...Option) (*Exporter, error) { cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...) c, err := newClient(cfg) if err != nil { return nil, err } return newExporter(c, cfg) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/exporter_test.go000066400000000000000000000062161452547353200306240ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" import ( "context" "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestExporterClientConcurrentSafe(t *testing.T) { const goroutines = 5 coll, err := otest.NewHTTPCollector("", nil) require.NoError(t, err) ctx := context.Background() addr := coll.Addr().String() opts := []Option{WithEndpoint(addr), WithInsecure()} cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...) client, err := newClient(cfg) require.NoError(t, err) exp, err := newExporter(client, oconf.Config{}) require.NoError(t, err) rm := new(metricdata.ResourceMetrics) done := make(chan struct{}) var wg, someWork sync.WaitGroup for i := 0; i < goroutines; i++ { wg.Add(1) someWork.Add(1) go func() { defer wg.Done() assert.NoError(t, exp.Export(ctx, rm)) assert.NoError(t, exp.ForceFlush(ctx)) // Ensure some work is done before shutting down. someWork.Done() for { _ = exp.Export(ctx, rm) _ = exp.ForceFlush(ctx) select { case <-done: return default: } } }() } someWork.Wait() assert.NoError(t, exp.Shutdown(ctx)) assert.ErrorIs(t, exp.Shutdown(ctx), errShutdown) close(done) wg.Wait() } func TestExporterDoesNotBlockTemporalityAndAggregation(t *testing.T) { rCh := make(chan otest.ExportResult, 1) coll, err := otest.NewHTTPCollector("", rCh) require.NoError(t, err) ctx := context.Background() addr := coll.Addr().String() opts := []Option{WithEndpoint(addr), WithInsecure()} cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...) client, err := newClient(cfg) require.NoError(t, err) exp, err := newExporter(client, oconf.Config{}) require.NoError(t, err) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() rm := new(metricdata.ResourceMetrics) t.Log("starting export") require.NoError(t, exp.Export(ctx, rm)) t.Log("export complete") }() assert.Eventually(t, func() bool { const inst = metric.InstrumentKindCounter // These should not be blocked. t.Log("getting temporality") _ = exp.Temporality(inst) t.Log("getting aggregation") _ = exp.Aggregation(inst) return true }, time.Second, 10*time.Millisecond) // Clear the export. rCh <- otest.ExportResult{} close(rCh) wg.Wait() } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod000066400000000000000000000031251452547353200264700ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp go 1.20 retract v0.32.2 // Contains unresolvable dependencies. require ( github.com/cenkalti/backoff/v4 v4.2.1 github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.21.0 go.opentelemetry.io/proto/otlp v1.0.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel => ../../../.. replace go.opentelemetry.io/otel/sdk => ../../../../sdk replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric replace go.opentelemetry.io/otel/metric => ../../../../metric replace go.opentelemetry.io/otel/trace => ../../../../trace opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum000066400000000000000000000110731452547353200265160ustar00rootroot00000000000000github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/000077500000000000000000000000001452547353200271755ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/000077500000000000000000000000001452547353200311535ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go000066400000000000000000000131721452547353200334640ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig" import ( "crypto/tls" "crypto/x509" "errors" "fmt" "net/url" "strconv" "strings" "time" "go.opentelemetry.io/otel/internal/global" ) // ConfigFn is the generic function used to set a config. type ConfigFn func(*EnvOptionsReader) // EnvOptionsReader reads the required environment variables. type EnvOptionsReader struct { GetEnv func(string) string ReadFile func(string) ([]byte, error) Namespace string } // Apply runs every ConfigFn. func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { for _, o := range opts { o(e) } } // GetEnvValue gets an OTLP environment variable value of the specified key // using the GetEnv function. // This function prepends the OTLP specified namespace to all key lookups. func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) return v, v != "" } // WithString retrieves the specified config and passes it to ConfigFn as a string. func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(v) } } } // WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. func WithBool(n string, fn func(bool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b := strings.ToLower(v) == "true" fn(b) } } } // WithDuration retrieves the specified config and passes it to ConfigFn as a duration. func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { d, err := strconv.Atoi(v) if err != nil { global.Error(err, "parse duration", "input", v) return } fn(time.Duration(d) * time.Millisecond) } } } // WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(stringToHeader(v)) } } } // WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { u, err := url.Parse(v) if err != nil { global.Error(err, "parse url", "input", v) return } fn(u) } } } // WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b, err := e.ReadFile(v) if err != nil { global.Error(err, "read tls ca cert file", "file", v) return } c, err := createCertPool(b) if err != nil { global.Error(err, "create tls cert pool") return } fn(c) } } } // WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { return func(e *EnvOptionsReader) { vc, okc := e.GetEnvValue(nc) vk, okk := e.GetEnvValue(nk) if !okc || !okk { return } cert, err := e.ReadFile(vc) if err != nil { global.Error(err, "read tls client cert", "file", vc) return } key, err := e.ReadFile(vk) if err != nil { global.Error(err, "read tls client key", "file", vk) return } crt, err := tls.X509KeyPair(cert, key) if err != nil { global.Error(err, "create tls client key pair") return } fn(crt) } } func keyWithNamespace(ns, key string) string { if ns == "" { return key } return fmt.Sprintf("%s_%s", ns, key) } func stringToHeader(value string) map[string]string { headersPairs := strings.Split(value, ",") headers := make(map[string]string) for _, header := range headersPairs { n, v, found := strings.Cut(header, "=") if !found { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } name, err := url.PathUnescape(n) if err != nil { global.Error(err, "escape header key", "key", n) continue } trimmedName := strings.TrimSpace(name) value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) continue } trimmedValue := strings.TrimSpace(value) headers[trimmedName] = trimmedValue } return headers } func createCertPool(certBytes []byte) (*x509.CertPool, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return cp, nil } envconfig_test.go000066400000000000000000000260371452547353200344500ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig import ( "crypto/tls" "crypto/x509" "errors" "net/url" "testing" "time" "github.com/stretchr/testify/assert" ) const WeakKey = ` -----BEGIN EC PRIVATE KEY----- MHcCAQEEIEbrSPmnlSOXvVzxCyv+VR3a0HDeUTvOcqrdssZ2k4gFoAoGCCqGSM49 AwEHoUQDQgAEDMTfv75J315C3K9faptS9iythKOMEeV/Eep73nWX531YAkmmwBSB 2dXRD/brsgLnfG57WEpxZuY7dPRbxu33BA== -----END EC PRIVATE KEY----- ` const WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBjjCCATWgAwIBAgIUKQSMC66MUw+kPp954ZYOcyKAQDswCgYIKoZIzj0EAwIw EjEQMA4GA1UECgwHb3RlbC1nbzAeFw0yMjEwMTkwMDA5MTlaFw0yMzEwMTkwMDA5 MTlaMBIxEDAOBgNVBAoMB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC AAQMxN+/vknfXkLcr19qm1L2LK2Eo4wR5X8R6nvedZfnfVgCSabAFIHZ1dEP9uuy Aud8bntYSnFm5jt09FvG7fcEo2kwZzAdBgNVHQ4EFgQUicGuhnTTkYLZwofXMNLK SHFeCWgwHwYDVR0jBBgwFoAUicGuhnTTkYLZwofXMNLKSHFeCWgwDwYDVR0TAQH/ BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIg Lfma8FnnxeSOi6223AsFfYwsNZ2RderNsQrS0PjEHb0CIBkrWacqARUAu7uT4cGu jVcIxYQqhId5L8p/mAv2PWZS -----END CERTIFICATE----- ` type testOption struct { TestString string TestBool bool TestDuration time.Duration TestHeaders map[string]string TestURL *url.URL TestTLS *tls.Config } func TestEnvConfig(t *testing.T) { parsedURL, err := url.Parse("https://example.com") assert.NoError(t, err) options := []testOption{} for _, testcase := range []struct { name string reader EnvOptionsReader configs []ConfigFn expectedOptions []testOption }{ { name: "with no namespace and a matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HOLA", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a namespace and a matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "MY_NAMESPACE_HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "true" } else if n == "WORLD" { return "false" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), WithBool("WORLD", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: true, }, { TestBool: false, }, }, }, { name: "with an invalid bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: false, }, }, }, { name: "with a duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "60" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{ { TestDuration: 60_000_000, // 60 milliseconds }, }, }, { name: "with an invalid duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{}, }, { name: "with headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "userId=42,userName=alice" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{ "userId": "42", "userName": "alice", }, }, }, }, { name: "with invalid headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{}, }, }, }, { name: "with URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "https://example.com" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{ { TestURL: parsedURL, }, }, }, { name: "with invalid URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "i nvalid://url" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{}, }, } { t.Run(testcase.name, func(t *testing.T) { testcase.reader.Apply(testcase.configs...) assert.Equal(t, testcase.expectedOptions, options) options = []testOption{} }) } } func TestWithTLSConfig(t *testing.T) { pool, err := createCertPool([]byte(WeakCertificate)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { if n == "CERTIFICATE" { return "/path/cert.pem" } return "" }, ReadFile: func(p string) ([]byte, error) { if p == "/path/cert.pem" { return []byte(WeakCertificate), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithCertPool("CERTIFICATE", func(cp *x509.CertPool) { option = testOption{TestTLS: &tls.Config{RootCAs: cp}} }), ) // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, pool.Subjects(), option.TestTLS.RootCAs.Subjects()) } func TestWithClientCert(t *testing.T) { cert, err := tls.X509KeyPair([]byte(WeakCertificate), []byte(WeakKey)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { switch n { case "CLIENT_CERTIFICATE": return "/path/tls.crt" case "CLIENT_KEY": return "/path/tls.key" } return "" }, ReadFile: func(n string) ([]byte, error) { switch n { case "/path/tls.crt": return []byte(WeakCertificate), nil case "/path/tls.key": return []byte(WeakKey), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Equal(t, cert, option.TestTLS.Certificates[0]) reader.ReadFile = func(s string) ([]byte, error) { return nil, errors.New("oops") } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) reader.GetEnv = func(s string) string { return "" } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) } func TestStringToHeader(t *testing.T) { tests := []struct { name string value string want map[string]string }{ { name: "simple test", value: "userId=alice", want: map[string]string{"userId": "alice"}, }, { name: "simple test with spaces", value: " userId = alice ", want: map[string]string{"userId": "alice"}, }, { name: "simple header conforms to RFC 3986 spec", value: " userId = alice+test ", want: map[string]string{"userId": "alice+test"}, }, { name: "multiple headers encoded", value: "userId=alice,serverNode=DF%3A28,isProduction=false", want: map[string]string{ "userId": "alice", "serverNode": "DF:28", "isProduction": "false", }, }, { name: "multiple headers encoded per RFC 3986 spec", value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test", want: map[string]string{ "userId": "alice+test", "serverNode": "DF:28", "isProduction": "false", "namespace": "localhost/test", }, }, { name: "invalid headers format", value: "userId:alice", want: map[string]string{}, }, { name: "invalid key", value: "%XX=missing,userId=alice", want: map[string]string{ "userId": "alice", }, }, { name: "invalid value", value: "missing=%XX,userId=alice", want: map[string]string{ "userId": "alice", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.want, stringToHeader(tt.value)) }) } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/gen.go000066400000000000000000000100421452547353200302720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/envconfig.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl "--data={}" --out=oconf/envconfig_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry\"}" --out=oconf/options.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig\"}" --out=oconf/options_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl "--data={}" --out=oconf/optiontypes.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl "--data={}" --out=oconf/tls.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client.go.tmpl "--data={}" --out=otest/client.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl "--data={\"internalImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal\"}" --out=otest/client_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/otest/collector.go.tmpl "--data={\"oconfImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf\"}" --out=otest/collector.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl "--data={}" --out=transform/attribute.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl "--data={}" --out=transform/attribute_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error.go.tmpl "--data={}" --out=transform/error.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl "--data={}" --out=transform/error_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl "--data={}" --out=transform/metricdata.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl "--data={}" --out=transform/metricdata_test.go opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/000077500000000000000000000000001452547353200303015ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig.go000066400000000000000000000172121452547353200326110ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" import ( "crypto/tls" "crypto/x509" "net/url" "os" "path" "strings" "time" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // DefaultEnvOptionsReader is the default environments reader. var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: os.Getenv, ReadFile: os.ReadFile, Namespace: "OTEL_EXPORTER_OTLP", } // ApplyGRPCEnvConfigs applies the env configurations for gRPC. func ApplyGRPCEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } return cfg } // ApplyHTTPEnvConfigs applies the env configurations for HTTP. func ApplyHTTPEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } return cfg } func getOptionsFromEnv() []GenericOption { opts := []GenericOption{} tlsConf := &tls.Config{} DefaultEnvOptionsReader.Apply( envconfig.WithURL("ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Metrics.Endpoint = u.Host // For OTLP/HTTP endpoint URLs without a per-signal // configuration, the passed endpoint is used as a base URL // and the signals are sent to these paths relative to that. cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath) return cfg }, withEndpointForGRPC(u))) }), envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Metrics.Endpoint = u.Host // For endpoint URLs for OTLP/HTTP per-signal variables, the // URL MUST be used as-is without any modification. The only // exception is that if an URL contains no path part, the root // path / MUST be used. path := u.Path if path == "" { path = "/" } cfg.Metrics.URLPath = path return cfg }, withEndpointForGRPC(u))) }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), ) return opts } func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { return func(cfg Config) Config { // For OTLP/gRPC endpoints, this is the target to which the // exporter is going to send telemetry. cfg.Metrics.Endpoint = path.Join(u.Host, u.Path) return cfg } } // WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { cp := NoCompression if v == "gzip" { cp = GzipCompression } fn(cp) } } } func withEndpointScheme(u *url.URL) GenericOption { switch strings.ToLower(u.Scheme) { case "http", "unix": return WithInsecure() default: return WithSecure() } } // revive:disable-next-line:flag-parameter func withInsecure(b bool) GenericOption { if b { return WithInsecure() } return WithSecure() } func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if c.RootCAs != nil || len(c.Certificates) > 0 { fn(c) } } } func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "cumulative": fn(cumulativeTemporality) case "delta": fn(deltaTemporality) case "lowmemory": fn(lowMemory) default: global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) } } } } func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality { switch ik { case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter: return metricdata.DeltaTemporality default: return metricdata.CumulativeTemporality } } func lowMemory(ik metric.InstrumentKind) metricdata.Temporality { switch ik { case metric.InstrumentKindCounter, metric.InstrumentKindHistogram: return metricdata.DeltaTemporality default: return metricdata.CumulativeTemporality } } func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "explicit_bucket_histogram": fn(metric.DefaultAggregationSelector) case "base2_exponential_bucket_histogram": fn(func(kind metric.InstrumentKind) metric.Aggregation { if kind == metric.InstrumentKindHistogram { return metric.AggregationBase2ExponentialHistogram{ MaxSize: 160, MaxScale: 20, NoMinMax: false, } } return metric.DefaultAggregationSelector(kind) }) default: global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) } } } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/envconfig_test.go000066400000000000000000000150701452547353200336500ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestWithEnvTemporalityPreference(t *testing.T) { origReader := DefaultEnvOptionsReader.GetEnv tests := []struct { name string envValue string want map[metric.InstrumentKind]metricdata.Temporality }{ { name: "default do not set the selector", envValue: "", }, { name: "non-normative do not set the selector", envValue: "non-normative", }, { name: "cumulative", envValue: "cumulative", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.CumulativeTemporality, metric.InstrumentKindHistogram: metricdata.CumulativeTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, { name: "delta", envValue: "delta", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.DeltaTemporality, metric.InstrumentKindHistogram: metricdata.DeltaTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.DeltaTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, { name: "lowmemory", envValue: "lowmemory", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.DeltaTemporality, metric.InstrumentKindHistogram: metricdata.DeltaTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { DefaultEnvOptionsReader.GetEnv = func(key string) string { if key == "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" { return tt.envValue } return origReader(key) } cfg := Config{} cfg = ApplyGRPCEnvConfigs(cfg) if tt.want == nil { // There is no function set, the SDK's default is used. assert.Nil(t, cfg.Metrics.TemporalitySelector) return } require.NotNil(t, cfg.Metrics.TemporalitySelector) for ik, want := range tt.want { assert.Equal(t, want, cfg.Metrics.TemporalitySelector(ik)) } }) } DefaultEnvOptionsReader.GetEnv = origReader } func TestWithEnvAggPreference(t *testing.T) { origReader := DefaultEnvOptionsReader.GetEnv tests := []struct { name string envValue string want map[metric.InstrumentKind]metric.Aggregation }{ { name: "default do not set the selector", envValue: "", }, { name: "non-normative do not set the selector", envValue: "non-normative", }, { name: "explicit_bucket_histogram", envValue: "explicit_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), metric.InstrumentKindHistogram: metric.DefaultAggregationSelector(metric.InstrumentKindHistogram), metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), }, }, { name: "base2_exponential_bucket_histogram", envValue: "base2_exponential_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), metric.InstrumentKindHistogram: metric.AggregationBase2ExponentialHistogram{ MaxSize: 160, MaxScale: 20, NoMinMax: false, }, metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { DefaultEnvOptionsReader.GetEnv = func(key string) string { if key == "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION" { return tt.envValue } return origReader(key) } cfg := Config{} cfg = ApplyGRPCEnvConfigs(cfg) if tt.want == nil { // There is no function set, the SDK's default is used. assert.Nil(t, cfg.Metrics.AggregationSelector) return } require.NotNil(t, cfg.Metrics.AggregationSelector) for ik, want := range tt.want { assert.Equal(t, want, cfg.Metrics.AggregationSelector(ik)) } }) } DefaultEnvOptionsReader.GetEnv = origReader } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go000066400000000000000000000230211452547353200323210ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" import ( "crypto/tls" "fmt" "path" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" "go.opentelemetry.io/otel/sdk/metric" ) const ( // DefaultMaxAttempts describes how many times the driver // should retry the sending of the payload in case of a // retryable error. DefaultMaxAttempts int = 5 // DefaultMetricsPath is a default URL path for endpoint that // receives metrics. DefaultMetricsPath string = "/v1/metrics" // DefaultBackoff is a default base backoff time used in the // exponential backoff strategy. DefaultBackoff time.Duration = 300 * time.Millisecond // DefaultTimeout is a default max waiting time for the backend to process // each span or metrics batch. DefaultTimeout time.Duration = 10 * time.Second ) type ( SignalConfig struct { Endpoint string Insecure bool TLSCfg *tls.Config Headers map[string]string Compression Compression Timeout time.Duration URLPath string // gRPC configurations GRPCCredentials credentials.TransportCredentials TemporalitySelector metric.TemporalitySelector AggregationSelector metric.AggregationSelector } Config struct { // Signal specific configurations Metrics SignalConfig RetryConfig retry.Config // gRPC configurations ReconnectionPeriod time.Duration ServiceConfig string DialOptions []grpc.DialOption GRPCConn *grpc.ClientConn } ) // NewHTTPConfig returns a new Config with all settings applied from opts and // any unset setting using the default HTTP config values. func NewHTTPConfig(opts ...HTTPOption) Config { cfg := Config{ Metrics: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), URLPath: DefaultMetricsPath, Compression: NoCompression, Timeout: DefaultTimeout, TemporalitySelector: metric.DefaultTemporalitySelector, AggregationSelector: metric.DefaultAggregationSelector, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyHTTPEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath) return cfg } // cleanPath returns a path with all spaces trimmed and all redundancies // removed. If urlPath is empty or cleaning it results in an empty string, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { tmp := path.Clean(strings.TrimSpace(urlPath)) if tmp == "." { return defaultPath } if !path.IsAbs(tmp) { tmp = fmt.Sprintf("/%s", tmp) } return tmp } // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { cfg := Config{ Metrics: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), URLPath: DefaultMetricsPath, Compression: NoCompression, Timeout: DefaultTimeout, TemporalitySelector: metric.DefaultTemporalitySelector, AggregationSelector: metric.DefaultAggregationSelector, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } // Priroritize GRPCCredentials over Insecure (passing both is an error). if cfg.Metrics.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) } else if cfg.Metrics.Insecure { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) } else { // Default to using the host's root CA. creds := credentials.NewTLS(nil) cfg.Metrics.GRPCCredentials = creds cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) } if cfg.Metrics.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, MinConnectTimeout: cfg.ReconnectionPeriod, } cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) } return cfg } type ( // GenericOption applies an option to the HTTP or gRPC driver. GenericOption interface { ApplyHTTPOption(Config) Config ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // HTTPOption applies an option to the HTTP driver. HTTPOption interface { ApplyHTTPOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // GRPCOption applies an option to the gRPC driver. GRPCOption interface { ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } ) // genericOption is an option that applies the same logic // for both gRPC and HTTP. type genericOption struct { fn func(Config) Config } func (g *genericOption) ApplyGRPCOption(cfg Config) Config { return g.fn(cfg) } func (g *genericOption) ApplyHTTPOption(cfg Config) Config { return g.fn(cfg) } func (genericOption) private() {} func newGenericOption(fn func(cfg Config) Config) GenericOption { return &genericOption{fn: fn} } // splitOption is an option that applies different logics // for gRPC and HTTP. type splitOption struct { httpFn func(Config) Config grpcFn func(Config) Config } func (g *splitOption) ApplyGRPCOption(cfg Config) Config { return g.grpcFn(cfg) } func (g *splitOption) ApplyHTTPOption(cfg Config) Config { return g.httpFn(cfg) } func (splitOption) private() {} func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { return &splitOption{httpFn: httpFn, grpcFn: grpcFn} } // httpOption is an option that is only applied to the HTTP driver. type httpOption struct { fn func(Config) Config } func (h *httpOption) ApplyHTTPOption(cfg Config) Config { return h.fn(cfg) } func (httpOption) private() {} func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { return &httpOption{fn: fn} } // grpcOption is an option that is only applied to the gRPC driver. type grpcOption struct { fn func(Config) Config } func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { return h.fn(cfg) } func (grpcOption) private() {} func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { return &grpcOption{fn: fn} } // Generic Options func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Endpoint = endpoint return cfg }) } func WithCompression(compression Compression) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Compression = compression return cfg }) } func WithURLPath(urlPath string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.URLPath = urlPath return cfg }) } func WithRetry(rc retry.Config) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.RetryConfig = rc return cfg }) } func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { return newSplitOption(func(cfg Config) Config { cfg.Metrics.TLSCfg = tlsCfg.Clone() return cfg }, func(cfg Config) Config { cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg) return cfg }) } func WithInsecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Insecure = true return cfg }) } func WithSecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Insecure = false return cfg }) } func WithHeaders(headers map[string]string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Headers = headers return cfg }) } func WithTimeout(duration time.Duration) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Timeout = duration return cfg }) } func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.TemporalitySelector = selector return cfg }) } func WithAggregationSelector(selector metric.AggregationSelector) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.AggregationSelector = selector return cfg }) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options_test.go000066400000000000000000000365751452547353200334020ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "errors" "testing" "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) const ( WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9 nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/ 1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH Lhnm4N/QDk5rek0= -----END CERTIFICATE----- ` WeakPrivateKey = ` -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV -----END PRIVATE KEY----- ` ) type env map[string]string func (e *env) getEnv(env string) string { return (*e)[env] } type fileReader map[string][]byte func (f *fileReader) readFile(filename string) ([]byte, error) { if b, ok := (*f)[filename]; ok { return b, nil } return nil, errors.New("file not found") } func TestConfigs(t *testing.T) { tlsCert, err := CreateTLSConfig([]byte(WeakCertificate)) assert.NoError(t, err) tests := []struct { name string opts []GenericOption env env fileReader fileReader asserts func(t *testing.T, c *Config, grpcOption bool) }{ { name: "Test default configs", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.Equal(t, "localhost:4317", c.Metrics.Endpoint) } else { assert.Equal(t, "localhost:4318", c.Metrics.Endpoint) } assert.Equal(t, NoCompression, c.Metrics.Compression) assert.Equal(t, map[string]string(nil), c.Metrics.Headers) assert.Equal(t, 10*time.Second, c.Metrics.Timeout) }, }, // Endpoint Tests { name: "Test With Endpoint", opts: []GenericOption{ WithEndpoint("someendpoint"), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "someendpoint", c.Metrics.Endpoint) }, }, { name: "Test Environment Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.False(t, c.Metrics.Insecure) if grpcOption { assert.Equal(t, "env.endpoint/prefix", c.Metrics.Endpoint) } else { assert.Equal(t, "env.endpoint", c.Metrics.Endpoint) assert.Equal(t, "/prefix/v1/metrics", c.Metrics.URLPath) } }, }, { name: "Test Environment Signal Specific Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var", "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "http://env.metrics.endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.True(t, c.Metrics.Insecure) assert.Equal(t, "env.metrics.endpoint", c.Metrics.Endpoint) if !grpcOption { assert.Equal(t, "/", c.Metrics.URLPath) } }, }, { name: "Test Mixed Environment and With Endpoint", opts: []GenericOption{ WithEndpoint("metrics_endpoint"), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint) }, }, { name: "Test Environment Endpoint with HTTP scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, { name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, { name: "Test Environment Endpoint with HTTPS scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, false, c.Metrics.Insecure) }, }, { name: "Test Environment Signal Specific Endpoint with uppercase scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "HtTp://env_metrics_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, // Certificate tests { name: "Test Default Certificate", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { assert.Nil(t, c.Metrics.TLSCfg) } }, }, { name: "Test With Certificate", opts: []GenericOption{ WithTLSClientConfig(tlsCert), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { // TODO: make sure gRPC's credentials actually works assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Signal Specific Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), "invalid_cert": []byte("invalid certificate file."), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Mixed Environment and With Certificate", opts: []GenericOption{}, env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, 1, len(c.Metrics.TLSCfg.RootCAs.Subjects())) } }, }, // Headers tests { name: "Test With Headers", opts: []GenericOption{ WithHeaders(map[string]string{"h1": "v1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers) }, }, { name: "Test Environment Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers) }, }, { name: "Test Environment Signal Specific Headers", env: map[string]string{ "OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers) }, }, { name: "Test Mixed Environment and With Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, opts: []GenericOption{ WithHeaders(map[string]string{"m1": "mv1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers) }, }, // Compression Tests { name: "Test With Compression", opts: []GenericOption{ WithCompression(GzipCompression), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Environment Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Environment Signal Specific Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Mixed Environment and With Compression", opts: []GenericOption{ WithCompression(NoCompression), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, NoCompression, c.Metrics.Compression) }, }, // Timeout Tests { name: "Test With Timeout", opts: []GenericOption{ WithTimeout(time.Duration(5 * time.Second)), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, 5*time.Second, c.Metrics.Timeout) }, }, { name: "Test Environment Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 15*time.Second) }, }, { name: "Test Environment Signal Specific Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 28*time.Second) }, }, { name: "Test Mixed Environment and With Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000", }, opts: []GenericOption{ WithTimeout(5 * time.Second), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 5*time.Second) }, }, // Temporality Selector Tests { name: "WithTemporalitySelector", opts: []GenericOption{ WithTemporalitySelector(deltaSelector), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { // Function value comparisons are disallowed, test non-default // behavior of a TemporalitySelector here to ensure our "catch // all" was set. var undefinedKind metric.InstrumentKind got := c.Metrics.TemporalitySelector assert.Equal(t, metricdata.DeltaTemporality, got(undefinedKind)) }, }, // Aggregation Selector Tests { name: "WithAggregationSelector", opts: []GenericOption{ WithAggregationSelector(dropSelector), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { // Function value comparisons are disallowed, test non-default // behavior of a AggregationSelector here to ensure our "catch // all" was set. var undefinedKind metric.InstrumentKind got := c.Metrics.AggregationSelector assert.Equal(t, metric.AggregationDrop{}, got(undefinedKind)) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { origEOR := DefaultEnvOptionsReader DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: tt.env.getEnv, ReadFile: tt.fileReader.readFile, Namespace: "OTEL_EXPORTER_OTLP", } t.Cleanup(func() { DefaultEnvOptionsReader = origEOR }) // Tests Generic options as HTTP Options cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...) tt.asserts(t, &cfg, false) // Tests Generic options as gRPC Options cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...) tt.asserts(t, &cfg, true) }) } } func dropSelector(metric.InstrumentKind) metric.Aggregation { return metric.AggregationDrop{} } func deltaSelector(metric.InstrumentKind) metricdata.Temporality { return metricdata.DeltaTemporality } func asHTTPOptions(opts []GenericOption) []HTTPOption { converted := make([]HTTPOption, len(opts)) for i, o := range opts { converted[i] = NewHTTPOption(o.ApplyHTTPOption) } return converted } func asGRPCOptions(opts []GenericOption) []GRPCOption { converted := make([]GRPCOption, len(opts)) for i, o := range opts { converted[i] = NewGRPCOption(o.ApplyGRPCOption) } return converted } func TestCleanPath(t *testing.T) { type args struct { urlPath string defaultPath string } tests := []struct { name string args args want string }{ { name: "clean empty path", args: args{ urlPath: "", defaultPath: "DefaultPath", }, want: "DefaultPath", }, { name: "clean metrics path", args: args{ urlPath: "/prefix/v1/metrics", defaultPath: "DefaultMetricsPath", }, want: "/prefix/v1/metrics", }, { name: "clean traces path", args: args{ urlPath: "https://env_endpoint", defaultPath: "DefaultTracesPath", }, want: "/https:/env_endpoint", }, { name: "spaces trimmed", args: args{ urlPath: " /dir", }, want: "/dir", }, { name: "clean path empty", args: args{ urlPath: "dir/..", defaultPath: "DefaultTracesPath", }, want: "DefaultTracesPath", }, { name: "make absolute", args: args{ urlPath: "dir/a", }, want: "/dir/a", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want { t.Errorf("CleanPath() = %v, want %v", got, tt.want) } }) } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/optiontypes.go000066400000000000000000000044001452547353200332230ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" import "time" const ( // DefaultCollectorGRPCPort is the default gRPC port of the collector. DefaultCollectorGRPCPort uint16 = 4317 // DefaultCollectorHTTPPort is the default HTTP port of the collector. DefaultCollectorHTTPPort uint16 = 4318 // DefaultCollectorHost is the host address the Exporter will attempt // connect to if no collector address is provided. DefaultCollectorHost string = "localhost" ) // Compression describes the compression used for payloads sent to the // collector. type Compression int const ( // NoCompression tells the driver to send payloads without // compression. NoCompression Compression = iota // GzipCompression tells the driver to send payloads after // compressing them with gzip. GzipCompression ) // RetrySettings defines configuration for retrying batches in case of export failure // using an exponential backoff. type RetrySettings struct { // Enabled indicates whether to not retry sending batches in case of export failure. Enabled bool // InitialInterval the time to wait after the first failure before retrying. InitialInterval time.Duration // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between // consecutive retries will always be `MaxInterval`. MaxInterval time.Duration // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. // Once this value is reached, the data is discarded. MaxElapsedTime time.Duration } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/tls.go000066400000000000000000000030011452547353200314240ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" import ( "crypto/tls" "crypto/x509" "errors" "os" ) // ReadTLSConfigFromFile reads a PEM certificate file and creates // a tls.Config that will use this certifate to verify a server certificate. func ReadTLSConfigFromFile(path string) (*tls.Config, error) { b, err := os.ReadFile(path) if err != nil { return nil, err } return CreateTLSConfig(b) } // CreateTLSConfig creates a tls.Config from a raw certificate bytes // to verify a server certificate. func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return &tls.Config{ RootCAs: cp, }, nil } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/000077500000000000000000000000001452547353200303335ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go000066400000000000000000000224011452547353200321370ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/client.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest" import ( "context" "fmt" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) var ( // Sat Jan 01 2000 00:00:00 GMT+0000. start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0)) end = start.Add(30 * time.Second) kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "alice"}, }} kvBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "bob"}, }} kvSrvName = &cpb.KeyValue{Key: "service.name", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "test server"}, }} kvSrvVer = &cpb.KeyValue{Key: "service.version", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"}, }} min, max, sum = 2.0, 4.0, 90.0 hdp = []*mpb.HistogramDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sum, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: &min, Max: &max, }, } hist = &mpb.Histogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: hdp, } dPtsInt64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, { Attributes: []*cpb.KeyValue{kvBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 2}, }, } dPtsFloat64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0}, }, { Attributes: []*cpb.KeyValue{kvBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0}, }, } sumInt64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, IsMonotonic: true, DataPoints: dPtsInt64, } sumFloat64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, IsMonotonic: false, DataPoints: dPtsFloat64, } gaugeInt64 = &mpb.Gauge{DataPoints: dPtsInt64} gaugeFloat64 = &mpb.Gauge{DataPoints: dPtsFloat64} metrics = []*mpb.Metric{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: gaugeInt64}, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: gaugeFloat64}, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: sumInt64}, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: sumFloat64}, }, { Name: "histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: hist}, }, } scope = &cpb.InstrumentationScope{ Name: "test/code/path", Version: "v0.1.0", } scopeMetrics = []*mpb.ScopeMetrics{ { Scope: scope, Metrics: metrics, SchemaUrl: semconv.SchemaURL, }, } res = &rpb.Resource{ Attributes: []*cpb.KeyValue{kvSrvName, kvSrvVer}, } resourceMetrics = &mpb.ResourceMetrics{ Resource: res, ScopeMetrics: scopeMetrics, SchemaUrl: semconv.SchemaURL, } ) type Client interface { UploadMetrics(context.Context, *mpb.ResourceMetrics) error ForceFlush(context.Context) error Shutdown(context.Context) error } // ClientFactory is a function that when called returns a // Client implementation that is connected to also returned // Collector implementation. The Client is ready to upload metric data to the // Collector which is ready to store that data. // // If resultCh is not nil, the returned Collector needs to use the responses // from that channel to send back to the client for every export request. type ClientFactory func(resultCh <-chan ExportResult) (Client, Collector) // RunClientTests runs a suite of Client integration tests. For example: // // t.Run("Integration", RunClientTests(factory)) func RunClientTests(f ClientFactory) func(*testing.T) { return func(t *testing.T) { t.Run("ClientHonorsContextErrors", func(t *testing.T) { t.Run("Shutdown", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return c.Shutdown })) t.Run("ForceFlush", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return c.ForceFlush })) t.Run("UploadMetrics", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return func(ctx context.Context) error { return c.UploadMetrics(ctx, nil) } })) }) t.Run("ForceFlushFlushes", func(t *testing.T) { ctx := context.Background() client, collector := f(nil) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.ForceFlush(ctx)) rm := collector.Collect().Dump() // Data correctness is not important, just it was received. require.Greater(t, len(rm), 0, "no data uploaded") require.NoError(t, client.Shutdown(ctx)) rm = collector.Collect().Dump() assert.Len(t, rm, 0, "client did not flush all data") }) t.Run("UploadMetrics", func(t *testing.T) { ctx := context.Background() client, coll := f(nil) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.Shutdown(ctx)) got := coll.Collect().Dump() require.Len(t, got, 1, "upload of one ResourceMetrics") diff := cmp.Diff(got[0], resourceMetrics, cmp.Comparer(proto.Equal)) if diff != "" { t.Fatalf("unexpected ResourceMetrics:\n%s", diff) } }) t.Run("PartialSuccess", func(t *testing.T) { const n, msg = 2, "bad data" rCh := make(chan ExportResult, 3) rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{ PartialSuccess: &collpb.ExportMetricsPartialSuccess{ RejectedDataPoints: n, ErrorMessage: msg, }, }, } rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{ PartialSuccess: &collpb.ExportMetricsPartialSuccess{ // Should not be logged. RejectedDataPoints: 0, ErrorMessage: "", }, }, } rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{}, } ctx := context.Background() client, _ := f(rCh) defer func(orig otel.ErrorHandler) { otel.SetErrorHandler(orig) }(otel.GetErrorHandler()) errs := []error{} eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) }) otel.SetErrorHandler(eh) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.Shutdown(ctx)) require.Equal(t, 1, len(errs)) want := fmt.Sprintf("%s (%d metric data points rejected)", msg, n) assert.ErrorContains(t, errs[0], want) }) } } func testCtxErrs(factory func() func(context.Context) error) func(t *testing.T) { return func(t *testing.T) { t.Helper() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) t.Run("DeadlineExceeded", func(t *testing.T) { innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) t.Cleanup(innerCancel) <-innerCtx.Done() f := factory() assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded) }) t.Run("Canceled", func(t *testing.T) { innerCtx, innerCancel := context.WithCancel(ctx) innerCancel() f := factory() assert.ErrorIs(t, f(innerCtx), context.Canceled) }) } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client_test.go000066400000000000000000000044621452547353200332050ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest import ( "context" "testing" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" cpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { rCh <-chan ExportResult storage *Storage } func (c *client) Temporality(k metric.InstrumentKind) metricdata.Temporality { return metric.DefaultTemporalitySelector(k) } func (c *client) Aggregation(k metric.InstrumentKind) metric.Aggregation { return metric.DefaultAggregationSelector(k) } func (c *client) Collect() *Storage { return c.storage } func (c *client) UploadMetrics(ctx context.Context, rm *mpb.ResourceMetrics) error { c.storage.Add(&cpb.ExportMetricsServiceRequest{ ResourceMetrics: []*mpb.ResourceMetrics{rm}, }) if c.rCh != nil { r := <-c.rCh if r.Response != nil && r.Response.GetPartialSuccess() != nil { msg := r.Response.GetPartialSuccess().GetErrorMessage() n := r.Response.GetPartialSuccess().GetRejectedDataPoints() if msg != "" || n > 0 { otel.Handle(internal.MetricPartialSuccessError(n, msg)) } } return r.Err } return ctx.Err() } func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() } func (c *client) Shutdown(ctx context.Context) error { return ctx.Err() } func TestClientTests(t *testing.T) { factory := func(rCh <-chan ExportResult) (Client, Collector) { c := &client{rCh: rCh, storage: NewStorage()} return c, c } t.Run("Integration", RunClientTests(factory)) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go000066400000000000000000000277251452547353200326650ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/collector.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest" import ( "bytes" "compress/gzip" "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/tls" "crypto/x509" "crypto/x509/pkix" // nolint:depguard // This is for testing. "encoding/pem" "errors" "fmt" "io" "math/big" "net" "net/http" "net/url" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Collector is the collection target a Client sends metric uploads to. type Collector interface { Collect() *Storage } type ExportResult struct { Response *collpb.ExportMetricsServiceResponse Err error } // Storage stores uploaded OTLP metric data in their proto form. type Storage struct { dataMu sync.Mutex data []*mpb.ResourceMetrics } // NewStorage returns a configure storage ready to store received requests. func NewStorage() *Storage { return &Storage{} } // Add adds the request to the Storage. func (s *Storage) Add(request *collpb.ExportMetricsServiceRequest) { s.dataMu.Lock() defer s.dataMu.Unlock() s.data = append(s.data, request.ResourceMetrics...) } // Dump returns all added ResourceMetrics and clears the storage. func (s *Storage) Dump() []*mpb.ResourceMetrics { s.dataMu.Lock() defer s.dataMu.Unlock() var data []*mpb.ResourceMetrics data, s.data = s.data, []*mpb.ResourceMetrics{} return data } // GRPCCollector is an OTLP gRPC server that collects all requests it receives. type GRPCCollector struct { collpb.UnimplementedMetricsServiceServer headersMu sync.Mutex headers metadata.MD storage *Storage resultCh <-chan ExportResult listener net.Listener srv *grpc.Server } // NewGRPCCollector returns a *GRPCCollector that is listening at the provided // endpoint. // // If endpoint is an empty string, the returned collector will be listening on // the localhost interface at an OS chosen port. // // If errCh is not nil, the collector will respond to Export calls with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. func NewGRPCCollector(endpoint string, resultCh <-chan ExportResult) (*GRPCCollector, error) { if endpoint == "" { endpoint = "localhost:0" } c := &GRPCCollector{ storage: NewStorage(), resultCh: resultCh, } var err error c.listener, err = net.Listen("tcp", endpoint) if err != nil { return nil, err } c.srv = grpc.NewServer() collpb.RegisterMetricsServiceServer(c.srv, c) go func() { _ = c.srv.Serve(c.listener) }() return c, nil } // Shutdown shuts down the gRPC server closing all open connections and // listeners immediately. func (c *GRPCCollector) Shutdown() { c.srv.Stop() } // Addr returns the net.Addr c is listening at. func (c *GRPCCollector) Addr() net.Addr { return c.listener.Addr() } // Collect returns the Storage holding all collected requests. func (c *GRPCCollector) Collect() *Storage { return c.storage } // Headers returns the headers received for all requests. func (c *GRPCCollector) Headers() map[string][]string { // Makes a copy. c.headersMu.Lock() defer c.headersMu.Unlock() return metadata.Join(c.headers) } // Export handles the export req. func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) { c.storage.Add(req) if h, ok := metadata.FromIncomingContext(ctx); ok { c.headersMu.Lock() c.headers = metadata.Join(c.headers, h) c.headersMu.Unlock() } if c.resultCh != nil { r := <-c.resultCh if r.Response == nil { return &collpb.ExportMetricsServiceResponse{}, r.Err } return r.Response, r.Err } return &collpb.ExportMetricsServiceResponse{}, nil } var emptyExportMetricsServiceResponse = func() []byte { body := collpb.ExportMetricsServiceResponse{} r, err := proto.Marshal(&body) if err != nil { panic(err) } return r }() type HTTPResponseError struct { Err error Status int Header http.Header } func (e *HTTPResponseError) Error() string { return fmt.Sprintf("%d: %s", e.Status, e.Err) } func (e *HTTPResponseError) Unwrap() error { return e.Err } // HTTPCollector is an OTLP HTTP server that collects all requests it receives. type HTTPCollector struct { plainTextResponse bool headersMu sync.Mutex headers http.Header storage *Storage resultCh <-chan ExportResult listener net.Listener srv *http.Server } // NewHTTPCollector returns a *HTTPCollector that is listening at the provided // endpoint. // // If endpoint is an empty string, the returned collector will be listening on // the localhost interface at an OS chosen port, not use TLS, and listen at the // default OTLP metric endpoint path ("/v1/metrics"). If the endpoint contains // a prefix of "https" the server will generate weak self-signed TLS // certificates and use them to server data. If the endpoint contains a path, // that path will be used instead of the default OTLP metric endpoint path. // // If errCh is not nil, the collector will respond to HTTP requests with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } if u.Host == "" { u.Host = "localhost:0" } if u.Path == "" { u.Path = oconf.DefaultMetricsPath } c := &HTTPCollector{ headers: http.Header{}, storage: NewStorage(), resultCh: resultCh, } for _, opt := range opts { opt(c) } c.listener, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } mux := http.NewServeMux() mux.Handle(u.Path, http.HandlerFunc(c.handler)) c.srv = &http.Server{ Handler: mux, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, } if u.Scheme == "https" { cert, err := weakCertificate() if err != nil { return nil, err } c.srv.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, } go func() { _ = c.srv.ServeTLS(c.listener, "", "") }() } else { go func() { _ = c.srv.Serve(c.listener) }() } return c, nil } // WithHTTPCollectorRespondingPlainText makes the HTTPCollector return // a plaintext, instead of protobuf, response. func WithHTTPCollectorRespondingPlainText() func(*HTTPCollector) { return func(s *HTTPCollector) { s.plainTextResponse = true } } // Shutdown shuts down the HTTP server closing all open connections and // listeners. func (c *HTTPCollector) Shutdown(ctx context.Context) error { return c.srv.Shutdown(ctx) } // Addr returns the net.Addr c is listening at. func (c *HTTPCollector) Addr() net.Addr { return c.listener.Addr() } // Collect returns the Storage holding all collected requests. func (c *HTTPCollector) Collect() *Storage { return c.storage } // Headers returns the headers received for all requests. func (c *HTTPCollector) Headers() map[string][]string { // Makes a copy. c.headersMu.Lock() defer c.headersMu.Unlock() return c.headers.Clone() } func (c *HTTPCollector) handler(w http.ResponseWriter, r *http.Request) { c.respond(w, c.record(r)) } func (c *HTTPCollector) record(r *http.Request) ExportResult { // Currently only supports protobuf. if v := r.Header.Get("Content-Type"); v != "application/x-protobuf" { err := fmt.Errorf("content-type not supported: %s", v) return ExportResult{Err: err} } body, err := c.readBody(r) if err != nil { return ExportResult{Err: err} } pbRequest := &collpb.ExportMetricsServiceRequest{} err = proto.Unmarshal(body, pbRequest) if err != nil { return ExportResult{ Err: &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, }, } } c.storage.Add(pbRequest) c.headersMu.Lock() for k, vals := range r.Header { for _, v := range vals { c.headers.Add(k, v) } } c.headersMu.Unlock() if c.resultCh != nil { return <-c.resultCh } return ExportResult{Err: err} } func (c *HTTPCollector) readBody(r *http.Request) (body []byte, err error) { var reader io.ReadCloser switch r.Header.Get("Content-Encoding") { case "gzip": reader, err = gzip.NewReader(r.Body) if err != nil { _ = reader.Close() return nil, &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, } } default: reader = r.Body } defer func() { cErr := reader.Close() if err == nil && cErr != nil { err = &HTTPResponseError{ Err: cErr, Status: http.StatusInternalServerError, } } }() body, err = io.ReadAll(reader) if err != nil { err = &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, } } return body, err } func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) { if resp.Err != nil { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") var e *HTTPResponseError if errors.As(resp.Err, &e) { for k, vals := range e.Header { for _, v := range vals { w.Header().Add(k, v) } } w.WriteHeader(e.Status) fmt.Fprintln(w, e.Error()) } else { w.WriteHeader(http.StatusBadRequest) fmt.Fprintln(w, resp.Err.Error()) } return } if c.plainTextResponse { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte("OK")) return } w.Header().Set("Content-Type", "application/x-protobuf") w.WriteHeader(http.StatusOK) if resp.Response == nil { _, _ = w.Write(emptyExportMetricsServiceResponse) } else { r, err := proto.Marshal(resp.Response) if err != nil { panic(err) } _, _ = w.Write(r) } } // Based on https://golang.org/src/crypto/tls/generate_cert.go, // simplified and weakened. func weakCertificate() (tls.Certificate, error) { priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return tls.Certificate{}, err } notBefore := time.Now() notAfter := notBefore.Add(time.Hour) max := new(big.Int).Lsh(big.NewInt(1), 128) sn, err := rand.Int(rand.Reader, max) if err != nil { return tls.Certificate{}, err } tmpl := x509.Certificate{ SerialNumber: sn, Subject: pkix.Name{Organization: []string{"otel-go"}}, NotBefore: notBefore, NotAfter: notAfter, KeyUsage: x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, DNSNames: []string{"localhost"}, IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)}, } derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) if err != nil { return tls.Certificate{}, err } var certBuf bytes.Buffer err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) if err != nil { return tls.Certificate{}, err } privBytes, err := x509.MarshalPKCS8PrivateKey(priv) if err != nil { return tls.Certificate{}, err } var privBuf bytes.Buffer err = pem.Encode(&privBuf, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}) if err != nil { return tls.Certificate{}, err } return tls.X509KeyPair(certBuf.Bytes(), privBuf.Bytes()) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess.go000066400000000000000000000042161452547353200325540ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal" import "fmt" // PartialSuccess represents the underlying error for all handling // OTLP partial success messages. Use `errors.Is(err, // PartialSuccess{})` to test whether an error passed to the OTel // error handler belongs to this category. type PartialSuccess struct { ErrorMessage string RejectedItems int64 RejectedKind string } var _ error = PartialSuccess{} // Error implements the error interface. func (ps PartialSuccess) Error() string { msg := ps.ErrorMessage if msg == "" { msg = "empty message" } return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) return ok } // TracePartialSuccessError returns an error describing a partial success // response for the trace signal. func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "spans", } } // MetricPartialSuccessError returns an error describing a partial success // response for the metric signal. func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "metric data points", } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/partialsuccess_test.go000066400000000000000000000031221452547353200336060ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess_test.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "errors" "strings" "testing" "github.com/stretchr/testify/require" ) func requireErrorString(t *testing.T, expect string, err error) { t.Helper() require.NotNil(t, err) require.Error(t, err) require.True(t, errors.Is(err, PartialSuccess{})) const pfx = "OTLP partial success: " msg := err.Error() require.True(t, strings.HasPrefix(msg, pfx)) require.Equal(t, expect, msg[len(pfx):]) } func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/000077500000000000000000000000001452547353200303425ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry.go000066400000000000000000000116021452547353200320360ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package retry provides request retry functionality that can perform // configurable exponential backoff for transient errors and honor any // explicit throttle responses received. package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry" import ( "context" "fmt" "time" "github.com/cenkalti/backoff/v4" ) // DefaultConfig are the recommended defaults to use. var DefaultConfig = Config{ Enabled: true, InitialInterval: 5 * time.Second, MaxInterval: 30 * time.Second, MaxElapsedTime: time.Minute, } // Config defines configuration for retrying batches in case of export failure // using an exponential backoff. type Config struct { // Enabled indicates whether to not retry sending batches in case of // export failure. Enabled bool // InitialInterval the time to wait after the first failure before // retrying. InitialInterval time.Duration // MaxInterval is the upper bound on backoff interval. Once this value is // reached the delay between consecutive retries will always be // `MaxInterval`. MaxInterval time.Duration // MaxElapsedTime is the maximum amount of time (including retries) spent // trying to send a request/batch. Once this value is reached, the data // is discarded. MaxElapsedTime time.Duration } // RequestFunc wraps a request with retry logic. type RequestFunc func(context.Context, func(context.Context) error) error // EvaluateFunc returns if an error is retry-able and if an explicit throttle // duration should be honored that was included in the error. // // The function must return true if the error argument is retry-able, // otherwise it must return false for the first return parameter. // // The function must return a non-zero time.Duration if the error contains // explicit throttle duration that should be honored, otherwise it must return // a zero valued time.Duration. type EvaluateFunc func(error) (bool, time.Duration) // RequestFunc returns a RequestFunc using the evaluate function to determine // if requests can be retried and based on the exponential backoff // configuration of c. func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { if !c.Enabled { return func(ctx context.Context, fn func(context.Context) error) error { return fn(ctx) } } return func(ctx context.Context, fn func(context.Context) error) error { // Do not use NewExponentialBackOff since it calls Reset and the code here // must call Reset after changing the InitialInterval (this saves an // unnecessary call to Now). b := &backoff.ExponentialBackOff{ InitialInterval: c.InitialInterval, RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, MaxElapsedTime: c.MaxElapsedTime, Stop: backoff.Stop, Clock: backoff.SystemClock, } b.Reset() for { err := fn(ctx) if err == nil { return nil } retryable, throttle := evaluate(err) if !retryable { return err } bOff := b.NextBackOff() if bOff == backoff.Stop { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. var delay time.Duration if bOff > throttle { delay = bOff } else { elapsed := b.GetElapsedTime() if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { return fmt.Errorf("max retry time would elapse: %w", err) } delay = throttle } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { return fmt.Errorf("%w: %s", ctxErr, err) } } } } // Allow override for testing. var waitFunc = wait // wait takes the caller's context, and the amount of time to wait. It will // return nil if the timer fires before or at the same time as the context's // deadline. This indicates that the call can be retried. func wait(ctx context.Context, delay time.Duration) error { timer := time.NewTimer(delay) defer timer.Stop() select { case <-ctx.Done(): // Handle the case where the timer and context deadline end // simultaneously by prioritizing the timer expiration nil value // response. select { case <-timer.C: default: return ctx.Err() } case <-timer.C: } return nil } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry/retry_test.go000066400000000000000000000145671452547353200331120ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package retry import ( "context" "errors" "math" "sync" "testing" "time" "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" ) func TestWait(t *testing.T) { tests := []struct { ctx context.Context delay time.Duration expected error }{ { ctx: context.Background(), delay: time.Duration(0), }, { ctx: context.Background(), delay: time.Duration(1), }, { ctx: context.Background(), delay: time.Duration(-1), }, { ctx: func() context.Context { ctx, cancel := context.WithCancel(context.Background()) cancel() return ctx }(), // Ensure the timer and context do not end simultaneously. delay: 1 * time.Hour, expected: context.Canceled, }, } for _, test := range tests { err := wait(test.ctx, test.delay) if test.expected == nil { assert.NoError(t, err) } else { assert.ErrorIs(t, err, test.expected) } } } func TestNonRetryableError(t *testing.T) { ev := func(error) (bool, time.Duration) { return false, 0 } reqFunc := Config{ Enabled: true, InitialInterval: 1 * time.Nanosecond, MaxInterval: 1 * time.Nanosecond, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestThrottledRetry(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. throttleDelay, backoffDelay := time.Second, time.Nanosecond ev := func(error) (bool, time.Duration) { // Retry everything with a throttle delay. return true, throttleDelay } reqFunc := Config{ Enabled: true, InitialInterval: backoffDelay, MaxInterval: backoffDelay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, delay time.Duration) error { assert.Equal(t, throttleDelay, delay, "retry not throttled") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } defer func() { waitFunc = origWait }() ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetry(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, d time.Duration) error { delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor) assert.InDelta(t, delay, d, delta, "retry not backoffed") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } t.Cleanup(func() { waitFunc = origWait }) ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetryCanceledContext(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Millisecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 10 * time.Millisecond, }.RequestFunc(ev) ctx, cancel := context.WithCancel(context.Background()) count := 0 cancel() err := reqFunc(ctx, func(context.Context) error { count++ return assert.AnError }) assert.ErrorIs(t, err, context.Canceled) assert.Contains(t, err.Error(), assert.AnError.Error()) assert.Equal(t, 1, count) } func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. tDelay, bDelay := time.Hour, time.Nanosecond ev := func(error) (bool, time.Duration) { return true, tDelay } reqFunc := Config{ Enabled: true, InitialInterval: bDelay, MaxInterval: bDelay, MaxElapsedTime: tDelay - (time.Nanosecond), }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time would elapse: ") } func TestMaxElapsedTime(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, // InitialInterval > MaxElapsedTime means immediate return. InitialInterval: 2 * delay, MaxElapsedTime: delay, }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time elapsed: ") } func TestRetryNotEnabled(t *testing.T) { ev := func(error) (bool, time.Duration) { t.Error("evaluated retry when not enabled") return false, 0 } reqFunc := Config{}.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestRetryConcurrentSafe(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } reqFunc := Config{ Enabled: true, }.RequestFunc(ev) var wg sync.WaitGroup ctx := context.Background() for i := 1; i < 5; i++ { wg.Add(1) go func() { defer wg.Done() var done bool assert.NoError(t, reqFunc(ctx, func(context.Context) error { if !done { done = true return assert.AnError } return nil })) }() } wg.Wait() } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/000077500000000000000000000000001452547353200312105ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/attribute.go000066400000000000000000000075771452547353200335620ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" import ( "go.opentelemetry.io/otel/attribute" cpb "go.opentelemetry.io/proto/otlp/common/v1" ) // AttrIter transforms an attribute iterator into OTLP key-values. func AttrIter(iter attribute.Iterator) []*cpb.KeyValue { l := iter.Len() if l == 0 { return nil } out := make([]*cpb.KeyValue, 0, l) for iter.Next() { out = append(out, KeyValue(iter.Attribute())) } return out } // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue { if len(attrs) == 0 { return nil } out := make([]*cpb.KeyValue, 0, len(attrs)) for _, kv := range attrs { out = append(out, KeyValue(kv)) } return out } // KeyValue transforms an attribute KeyValue into an OTLP key-value. func KeyValue(kv attribute.KeyValue) *cpb.KeyValue { return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} } // Value transforms an attribute Value into an OTLP AnyValue. func Value(v attribute.Value) *cpb.AnyValue { av := new(cpb.AnyValue) switch v.Type() { case attribute.BOOL: av.Value = &cpb.AnyValue_BoolValue{ BoolValue: v.AsBool(), } case attribute.BOOLSLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: boolSliceValues(v.AsBoolSlice()), }, } case attribute.INT64: av.Value = &cpb.AnyValue_IntValue{ IntValue: v.AsInt64(), } case attribute.INT64SLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: int64SliceValues(v.AsInt64Slice()), }, } case attribute.FLOAT64: av.Value = &cpb.AnyValue_DoubleValue{ DoubleValue: v.AsFloat64(), } case attribute.FLOAT64SLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: float64SliceValues(v.AsFloat64Slice()), }, } case attribute.STRING: av.Value = &cpb.AnyValue_StringValue{ StringValue: v.AsString(), } case attribute.STRINGSLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: stringSliceValues(v.AsStringSlice()), }, } default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", } } return av } func boolSliceValues(vals []bool) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_BoolValue{ BoolValue: v, }, } } return converted } func int64SliceValues(vals []int64) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_IntValue{ IntValue: v, }, } } return converted } func float64SliceValues(vals []float64) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_DoubleValue{ DoubleValue: v, }, } } return converted } func stringSliceValues(vals []string) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{ StringValue: v, }, } } return converted } attribute_test.go000066400000000000000000000131021452547353200345170ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" cpb "go.opentelemetry.io/proto/otlp/common/v1" ) var ( attrBool = attribute.Bool("bool", true) attrBoolSlice = attribute.BoolSlice("bool slice", []bool{true, false}) attrInt = attribute.Int("int", 1) attrIntSlice = attribute.IntSlice("int slice", []int{-1, 1}) attrInt64 = attribute.Int64("int64", 1) attrInt64Slice = attribute.Int64Slice("int64 slice", []int64{-1, 1}) attrFloat64 = attribute.Float64("float64", 1) attrFloat64Slice = attribute.Float64Slice("float64 slice", []float64{-1, 1}) attrString = attribute.String("string", "o") attrStringSlice = attribute.StringSlice("string slice", []string{"o", "n"}) attrInvalid = attribute.KeyValue{ Key: attribute.Key("invalid"), Value: attribute.Value{}, } valBoolTrue = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: true}} valBoolFalse = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: false}} valBoolSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valBoolTrue, valBoolFalse}, }, }} valIntOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: 1}} valIntNOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: -1}} valIntSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valIntNOne, valIntOne}, }, }} valDblOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: 1}} valDblNOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: -1}} valDblSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valDblNOne, valDblOne}, }, }} valStrO = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "o"}} valStrN = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "n"}} valStrSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valStrO, valStrN}, }, }} kvBool = &cpb.KeyValue{Key: "bool", Value: valBoolTrue} kvBoolSlice = &cpb.KeyValue{Key: "bool slice", Value: valBoolSlice} kvInt = &cpb.KeyValue{Key: "int", Value: valIntOne} kvIntSlice = &cpb.KeyValue{Key: "int slice", Value: valIntSlice} kvInt64 = &cpb.KeyValue{Key: "int64", Value: valIntOne} kvInt64Slice = &cpb.KeyValue{Key: "int64 slice", Value: valIntSlice} kvFloat64 = &cpb.KeyValue{Key: "float64", Value: valDblOne} kvFloat64Slice = &cpb.KeyValue{Key: "float64 slice", Value: valDblSlice} kvString = &cpb.KeyValue{Key: "string", Value: valStrO} kvStringSlice = &cpb.KeyValue{Key: "string slice", Value: valStrSlice} kvInvalid = &cpb.KeyValue{ Key: "invalid", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "INVALID"}, }, } ) type attributeTest struct { name string in []attribute.KeyValue want []*cpb.KeyValue } func TestAttributeTransforms(t *testing.T) { for _, test := range []attributeTest{ {"nil", nil, nil}, {"empty", []attribute.KeyValue{}, nil}, { "invalid", []attribute.KeyValue{attrInvalid}, []*cpb.KeyValue{kvInvalid}, }, { "bool", []attribute.KeyValue{attrBool}, []*cpb.KeyValue{kvBool}, }, { "bool slice", []attribute.KeyValue{attrBoolSlice}, []*cpb.KeyValue{kvBoolSlice}, }, { "int", []attribute.KeyValue{attrInt}, []*cpb.KeyValue{kvInt}, }, { "int slice", []attribute.KeyValue{attrIntSlice}, []*cpb.KeyValue{kvIntSlice}, }, { "int64", []attribute.KeyValue{attrInt64}, []*cpb.KeyValue{kvInt64}, }, { "int64 slice", []attribute.KeyValue{attrInt64Slice}, []*cpb.KeyValue{kvInt64Slice}, }, { "float64", []attribute.KeyValue{attrFloat64}, []*cpb.KeyValue{kvFloat64}, }, { "float64 slice", []attribute.KeyValue{attrFloat64Slice}, []*cpb.KeyValue{kvFloat64Slice}, }, { "string", []attribute.KeyValue{attrString}, []*cpb.KeyValue{kvString}, }, { "string slice", []attribute.KeyValue{attrStringSlice}, []*cpb.KeyValue{kvStringSlice}, }, { "all", []attribute.KeyValue{ attrBool, attrBoolSlice, attrInt, attrIntSlice, attrInt64, attrInt64Slice, attrFloat64, attrFloat64Slice, attrString, attrStringSlice, attrInvalid, }, []*cpb.KeyValue{ kvBool, kvBoolSlice, kvInt, kvIntSlice, kvInt64, kvInt64Slice, kvFloat64, kvFloat64Slice, kvString, kvStringSlice, kvInvalid, }, }, } { t.Run(test.name, func(t *testing.T) { t.Run("KeyValues", func(t *testing.T) { assert.ElementsMatch(t, test.want, KeyValues(test.in)) }) t.Run("AttrIter", func(t *testing.T) { s := attribute.NewSet(test.in...) assert.ElementsMatch(t, test.want, AttrIter(s.Iter())) }) }) } } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error.go000066400000000000000000000060001452547353200326640ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" import ( "errors" "fmt" "strings" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) var ( errUnknownAggregation = errors.New("unknown aggregation") errUnknownTemporality = errors.New("unknown temporality") ) type errMetric struct { m *mpb.Metric err error } func (e errMetric) Unwrap() error { return e.err } func (e errMetric) Error() string { format := "invalid metric (name: %q, description: %q, unit: %q): %s" return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err) } func (e errMetric) Is(target error) bool { return errors.Is(e.err, target) } // multiErr is used by the data-type transform functions to wrap multiple // errors into a single return value. The error message will show all errors // as a list and scope them by the datatype name that is returning them. type multiErr struct { datatype string errs []error } // errOrNil returns nil if e contains no errors, otherwise it returns e. func (e *multiErr) errOrNil() error { if len(e.errs) == 0 { return nil } return e } // append adds err to e. If err is a multiErr, its errs are flattened into e. func (e *multiErr) append(err error) { // Do not use errors.As here, this should only be flattened one layer. If // there is a *multiErr several steps down the chain, all the errors above // it will be discarded if errors.As is used instead. switch other := err.(type) { case *multiErr: // Flatten err errors into e. e.errs = append(e.errs, other.errs...) default: e.errs = append(e.errs, err) } } func (e *multiErr) Error() string { es := make([]string, len(e.errs)) for i, err := range e.errs { es[i] = fmt.Sprintf("* %s", err) } format := "%d errors occurred transforming %s:\n\t%s" return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t")) } func (e *multiErr) Unwrap() error { switch len(e.errs) { case 0: return nil case 1: return e.errs[0] } // Return a multiErr without the leading error. cp := &multiErr{ datatype: e.datatype, errs: make([]error, len(e.errs)-1), } copy(cp.errs, e.errs[1:]) return cp } func (e *multiErr) Is(target error) bool { if len(e.errs) == 0 { return false } // Check if the first error is target. return errors.Is(e.errs[0], target) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/error_test.go000066400000000000000000000050751452547353200337360ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( e0 = errMetric{m: pbMetrics[0], err: errUnknownAggregation} e1 = errMetric{m: pbMetrics[1], err: errUnknownTemporality} ) type testingErr struct{} func (testingErr) Error() string { return "testing error" } // errFunc is a non-comparable error type. type errFunc func() string func (e errFunc) Error() string { return e() } func TestMultiErr(t *testing.T) { const name = "TestMultiErr" me := &multiErr{datatype: name} t.Run("ErrOrNil", func(t *testing.T) { require.Nil(t, me.errOrNil()) me.errs = []error{e0} assert.Error(t, me.errOrNil()) }) var testErr testingErr t.Run("AppendError", func(t *testing.T) { me.append(testErr) assert.Equal(t, testErr, me.errs[len(me.errs)-1]) }) t.Run("AppendFlattens", func(t *testing.T) { other := &multiErr{datatype: "OtherTestMultiErr", errs: []error{e1}} me.append(other) assert.Equal(t, e1, me.errs[len(me.errs)-1]) }) t.Run("ErrorMessage", func(t *testing.T) { // Test the overall structure of the message, but not the exact // language so this doesn't become a change-indicator. msg := me.Error() lines := strings.Split(msg, "\n") assert.Equalf(t, 4, len(lines), "expected a 4 line error message, got:\n\n%s", msg) assert.Contains(t, msg, name) assert.Contains(t, msg, e0.Error()) assert.Contains(t, msg, testErr.Error()) assert.Contains(t, msg, e1.Error()) }) t.Run("ErrorIs", func(t *testing.T) { assert.ErrorIs(t, me, errUnknownAggregation) assert.ErrorIs(t, me, e0) assert.ErrorIs(t, me, testErr) assert.ErrorIs(t, me, errUnknownTemporality) assert.ErrorIs(t, me, e1) errUnknown := errFunc(func() string { return "unknown error" }) assert.NotErrorIs(t, me, errUnknown) var empty multiErr assert.NotErrorIs(t, &empty, errUnknownTemporality) }) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata.go000066400000000000000000000226431452547353200336630ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package transform provides transformation functionality from the // sdk/metric/metricdata data-types into OTLP data-types. package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform" import ( "fmt" "time" "go.opentelemetry.io/otel/sdk/metric/metricdata" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) // ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm // contains invalid ScopeMetrics, an error will be returned along with an OTLP // ResourceMetrics that contains partial OTLP ScopeMetrics. func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) { sms, err := ScopeMetrics(rm.ScopeMetrics) return &mpb.ResourceMetrics{ Resource: &rpb.Resource{ Attributes: AttrIter(rm.Resource.Iter()), }, ScopeMetrics: sms, SchemaUrl: rm.Resource.SchemaURL(), }, err } // ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If // sms contains invalid metric values, an error will be returned along with a // slice that contains partial OTLP ScopeMetrics. func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { errs := &multiErr{datatype: "ScopeMetrics"} out := make([]*mpb.ScopeMetrics, 0, len(sms)) for _, sm := range sms { ms, err := Metrics(sm.Metrics) if err != nil { errs.append(err) } out = append(out, &mpb.ScopeMetrics{ Scope: &cpb.InstrumentationScope{ Name: sm.Scope.Name, Version: sm.Scope.Version, }, Metrics: ms, SchemaUrl: sm.Scope.SchemaURL, }) } return out, errs.errOrNil() } // Metrics returns a slice of OTLP Metric generated from ms. If ms contains // invalid metric values, an error will be returned along with a slice that // contains partial OTLP Metrics. func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) { errs := &multiErr{datatype: "Metrics"} out := make([]*mpb.Metric, 0, len(ms)) for _, m := range ms { o, err := metric(m) if err != nil { // Do not include invalid data. Drop the metric, report the error. errs.append(errMetric{m: o, err: err}) continue } out = append(out, o) } return out, errs.errOrNil() } func metric(m metricdata.Metrics) (*mpb.Metric, error) { var err error out := &mpb.Metric{ Name: m.Name, Description: m.Description, Unit: string(m.Unit), } switch a := m.Data.(type) { case metricdata.Gauge[int64]: out.Data = Gauge[int64](a) case metricdata.Gauge[float64]: out.Data = Gauge[float64](a) case metricdata.Sum[int64]: out.Data, err = Sum[int64](a) case metricdata.Sum[float64]: out.Data, err = Sum[float64](a) case metricdata.Histogram[int64]: out.Data, err = Histogram(a) case metricdata.Histogram[float64]: out.Data, err = Histogram(a) case metricdata.ExponentialHistogram[int64]: out.Data, err = ExponentialHistogram(a) case metricdata.ExponentialHistogram[float64]: out.Data, err = ExponentialHistogram(a) default: return out, fmt.Errorf("%w: %T", errUnknownAggregation, a) } return out, err } // Gauge returns an OTLP Metric_Gauge generated from g. func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge { return &mpb.Metric_Gauge{ Gauge: &mpb.Gauge{ DataPoints: DataPoints(g.DataPoints), }, } } // Sum returns an OTLP Metric_Sum generated from s. An error is returned // if the temporality of s is unknown. func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) { t, err := Temporality(s.Temporality) if err != nil { return nil, err } return &mpb.Metric_Sum{ Sum: &mpb.Sum{ AggregationTemporality: t, IsMonotonic: s.IsMonotonic, DataPoints: DataPoints(s.DataPoints), }, }, nil } // DataPoints returns a slice of OTLP NumberDataPoint generated from dPts. func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint { out := make([]*mpb.NumberDataPoint, 0, len(dPts)) for _, dPt := range dPts { ndp := &mpb.NumberDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), } switch v := any(dPt.Value).(type) { case int64: ndp.Value = &mpb.NumberDataPoint_AsInt{ AsInt: v, } case float64: ndp.Value = &mpb.NumberDataPoint_AsDouble{ AsDouble: v, } } out = append(out, ndp) } return out } // Histogram returns an OTLP Metric_Histogram generated from h. An error is // returned if the temporality of h is unknown. func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err } return &mpb.Metric_Histogram{ Histogram: &mpb.Histogram{ AggregationTemporality: t, DataPoints: HistogramDataPoints(h.DataPoints), }, }, nil } // HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated // from dPts. func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint { out := make([]*mpb.HistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) hdp := &mpb.HistogramDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), Count: dPt.Count, Sum: &sum, BucketCounts: dPt.BucketCounts, ExplicitBounds: dPt.Bounds, } if v, ok := dPt.Min.Value(); ok { vF64 := float64(v) hdp.Min = &vF64 } if v, ok := dPt.Max.Value(); ok { vF64 := float64(v) hdp.Max = &vF64 } out = append(out, hdp) } return out } // ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is // returned if the temporality of h is unknown. func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err } return &mpb.Metric_ExponentialHistogram{ ExponentialHistogram: &mpb.ExponentialHistogram{ AggregationTemporality: t, DataPoints: ExponentialHistogramDataPoints(h.DataPoints), }, }, nil } // ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated // from dPts. func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) ehdp := &mpb.ExponentialHistogramDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), Count: dPt.Count, Sum: &sum, Scale: dPt.Scale, ZeroCount: dPt.ZeroCount, Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket), Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket), } if v, ok := dPt.Min.Value(); ok { vF64 := float64(v) ehdp.Min = &vF64 } if v, ok := dPt.Max.Value(); ok { vF64 := float64(v) ehdp.Max = &vF64 } out = append(out, ehdp) } return out } // ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated // from bucket. func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { return &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: bucket.Offset, BucketCounts: bucket.Counts, } } // Temporality returns an OTLP AggregationTemporality generated from t. If t // is unknown, an error is returned along with the invalid // AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED. func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { switch t { case metricdata.DeltaTemporality: return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil case metricdata.CumulativeTemporality: return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil default: err := fmt.Errorf("%w: %s", errUnknownTemporality, t) return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err } } // timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed // since January 1, 1970 UTC as uint64. // The result is undefined if the Unix time // in nanoseconds cannot be represented by an int64 // (a date before the year 1678 or after 2262). // timeUnixNano on the zero Time returns 0. // The result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { if t.IsZero() { return 0 } return uint64(t.UnixNano()) } metricdata_test.go000066400000000000000000000437171452547353200346500ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) type unknownAggT struct { metricdata.Aggregation } var ( // Sat Jan 01 2000 00:00:00 GMT+0000. start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0)) end = start.Add(30 * time.Second) alice = attribute.NewSet(attribute.String("user", "alice")) bob = attribute.NewSet(attribute.String("user", "bob")) pbAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "alice"}, }} pbBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "bob"}, }} minA, maxA, sumA = 2.0, 4.0, 90.0 minB, maxB, sumB = 4.0, 150.0, 234.0 otelHDPInt64 = []metricdata.HistogramDataPoint[int64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: metricdata.NewExtrema(int64(minA)), Max: metricdata.NewExtrema(int64(maxA)), Sum: int64(sumA), }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: metricdata.NewExtrema(int64(minB)), Max: metricdata.NewExtrema(int64(maxB)), Sum: int64(sumB), }, } otelHDPFloat64 = []metricdata.HistogramDataPoint[float64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: metricdata.NewExtrema(minA), Max: metricdata.NewExtrema(maxA), Sum: sumA, }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: metricdata.NewExtrema(minB), Max: metricdata.NewExtrema(maxB), Sum: sumB, }, } otelEBucketA = metricdata.ExponentialBucket{ Offset: 5, Counts: []uint64{0, 5, 0, 5}, } otelEBucketB = metricdata.ExponentialBucket{ Offset: 3, Counts: []uint64{0, 5, 0, 5}, } otelEBucketsC = metricdata.ExponentialBucket{ Offset: 5, Counts: []uint64{0, 1}, } otelEBucketsD = metricdata.ExponentialBucket{ Offset: 3, Counts: []uint64{0, 1}, } otelEHDPInt64 = []metricdata.ExponentialHistogramDataPoint[int64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Scale: 2, ZeroCount: 10, PositiveBucket: otelEBucketA, NegativeBucket: otelEBucketB, ZeroThreshold: .01, Min: metricdata.NewExtrema(int64(minA)), Max: metricdata.NewExtrema(int64(maxA)), Sum: int64(sumA), }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Scale: 4, ZeroCount: 1, PositiveBucket: otelEBucketsC, NegativeBucket: otelEBucketsD, ZeroThreshold: .02, Min: metricdata.NewExtrema(int64(minB)), Max: metricdata.NewExtrema(int64(maxB)), Sum: int64(sumB), }, } otelEHDPFloat64 = []metricdata.ExponentialHistogramDataPoint[float64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Scale: 2, ZeroCount: 10, PositiveBucket: otelEBucketA, NegativeBucket: otelEBucketB, ZeroThreshold: .01, Min: metricdata.NewExtrema(minA), Max: metricdata.NewExtrema(maxA), Sum: sumA, }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Scale: 4, ZeroCount: 1, PositiveBucket: otelEBucketsC, NegativeBucket: otelEBucketsD, ZeroThreshold: .02, Min: metricdata.NewExtrema(minB), Max: metricdata.NewExtrema(maxB), Sum: sumB, }, } pbHDP = []*mpb.HistogramDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sumA, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: &minA, Max: &maxA, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 3, Sum: &sumB, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: &minB, Max: &maxB, }, } pbEHDPBA = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 5, BucketCounts: []uint64{0, 5, 0, 5}, } pbEHDPBB = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 3, BucketCounts: []uint64{0, 5, 0, 5}, } pbEHDPBC = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 5, BucketCounts: []uint64{0, 1}, } pbEHDPBD = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 3, BucketCounts: []uint64{0, 1}, } pbEHDP = []*mpb.ExponentialHistogramDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sumA, Scale: 2, ZeroCount: 10, Positive: pbEHDPBA, Negative: pbEHDPBB, Min: &minA, Max: &maxA, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 3, Sum: &sumB, Scale: 4, ZeroCount: 1, Positive: pbEHDPBC, Negative: pbEHDPBD, Min: &minB, Max: &maxB, }, } otelHistInt64 = metricdata.Histogram[int64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelHDPInt64, } otelHistFloat64 = metricdata.Histogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelHDPFloat64, } invalidTemporality metricdata.Temporality otelHistInvalid = metricdata.Histogram[int64]{ Temporality: invalidTemporality, DataPoints: otelHDPInt64, } otelExpoHistInt64 = metricdata.ExponentialHistogram[int64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelEHDPInt64, } otelExpoHistFloat64 = metricdata.ExponentialHistogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelEHDPFloat64, } otelExpoHistInvalid = metricdata.ExponentialHistogram[int64]{ Temporality: invalidTemporality, DataPoints: otelEHDPInt64, } pbHist = &mpb.Histogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: pbHDP, } pbExpoHist = &mpb.ExponentialHistogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: pbEHDP, } otelDPtsInt64 = []metricdata.DataPoint[int64]{ {Attributes: alice, StartTime: start, Time: end, Value: 1}, {Attributes: bob, StartTime: start, Time: end, Value: 2}, } otelDPtsFloat64 = []metricdata.DataPoint[float64]{ {Attributes: alice, StartTime: start, Time: end, Value: 1.0}, {Attributes: bob, StartTime: start, Time: end, Value: 2.0}, } pbDPtsInt64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 2}, }, } pbDPtsFloat64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0}, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0}, }, } otelSumInt64 = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: otelDPtsInt64, } otelSumFloat64 = metricdata.Sum[float64]{ Temporality: metricdata.DeltaTemporality, IsMonotonic: false, DataPoints: otelDPtsFloat64, } otelSumInvalid = metricdata.Sum[float64]{ Temporality: invalidTemporality, IsMonotonic: false, DataPoints: otelDPtsFloat64, } pbSumInt64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, IsMonotonic: true, DataPoints: pbDPtsInt64, } pbSumFloat64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, IsMonotonic: false, DataPoints: pbDPtsFloat64, } otelGaugeInt64 = metricdata.Gauge[int64]{DataPoints: otelDPtsInt64} otelGaugeFloat64 = metricdata.Gauge[float64]{DataPoints: otelDPtsFloat64} otelGaugeZeroStartTime = metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: alice, StartTime: time.Time{}, Time: end, Value: 1}, }, } pbGaugeInt64 = &mpb.Gauge{DataPoints: pbDPtsInt64} pbGaugeFloat64 = &mpb.Gauge{DataPoints: pbDPtsFloat64} pbGaugeZeroStartTime = &mpb.Gauge{DataPoints: []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: 0, TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, }} unknownAgg unknownAggT otelMetrics = []metricdata.Metrics{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: otelGaugeInt64, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: otelGaugeFloat64, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: otelSumInt64, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: otelSumFloat64, }, { Name: "invalid-sum", Description: "Sum with invalid temporality", Unit: "1", Data: otelSumInvalid, }, { Name: "int64-histogram", Description: "Histogram", Unit: "1", Data: otelHistInt64, }, { Name: "float64-histogram", Description: "Histogram", Unit: "1", Data: otelHistFloat64, }, { Name: "invalid-histogram", Description: "Invalid histogram", Unit: "1", Data: otelHistInvalid, }, { Name: "unknown", Description: "Unknown aggregation", Unit: "1", Data: unknownAgg, }, { Name: "int64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: otelExpoHistInt64, }, { Name: "float64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: otelExpoHistFloat64, }, { Name: "invalid-ExponentialHistogram", Description: "Invalid Exponential Histogram", Unit: "1", Data: otelExpoHistInvalid, }, { Name: "zero-time", Description: "Gauge with 0 StartTime", Unit: "1", Data: otelGaugeZeroStartTime, }, } pbMetrics = []*mpb.Metric{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: pbSumInt64}, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: pbSumFloat64}, }, { Name: "int64-histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: pbHist}, }, { Name: "float64-histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: pbHist}, }, { Name: "int64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, }, { Name: "float64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, }, { Name: "zero-time", Description: "Gauge with 0 StartTime", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeZeroStartTime}, }, } otelScopeMetrics = []metricdata.ScopeMetrics{ { Scope: instrumentation.Scope{ Name: "test/code/path", Version: "v0.1.0", SchemaURL: semconv.SchemaURL, }, Metrics: otelMetrics, }, } pbScopeMetrics = []*mpb.ScopeMetrics{ { Scope: &cpb.InstrumentationScope{ Name: "test/code/path", Version: "v0.1.0", }, Metrics: pbMetrics, SchemaUrl: semconv.SchemaURL, }, } otelRes = resource.NewWithAttributes( semconv.SchemaURL, semconv.ServiceName("test server"), semconv.ServiceVersion("v0.1.0"), ) pbRes = &rpb.Resource{ Attributes: []*cpb.KeyValue{ { Key: "service.name", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "test server"}, }, }, { Key: "service.version", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"}, }, }, }, } otelResourceMetrics = &metricdata.ResourceMetrics{ Resource: otelRes, ScopeMetrics: otelScopeMetrics, } pbResourceMetrics = &mpb.ResourceMetrics{ Resource: pbRes, ScopeMetrics: pbScopeMetrics, SchemaUrl: semconv.SchemaURL, } ) func TestTransformations(t *testing.T) { // Run tests from the "bottom-up" of the metricdata data-types and halt // when a failure occurs to ensure the clearest failure message (as // opposed to the opposite of testing from the top-down which will obscure // errors deep inside the structs). // DataPoint types. assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPInt64)) assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPFloat64)) assert.Equal(t, pbDPtsInt64, DataPoints[int64](otelDPtsInt64)) require.Equal(t, pbDPtsFloat64, DataPoints[float64](otelDPtsFloat64)) assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPInt64)) assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPFloat64)) assert.Equal(t, pbEHDPBA, ExponentialHistogramDataPointBuckets(otelEBucketA)) // Aggregations. h, err := Histogram(otelHistInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h) h, err = Histogram(otelHistFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h) h, err = Histogram(otelHistInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, h) s, err := Sum[int64](otelSumInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumInt64}, s) s, err = Sum[float64](otelSumFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumFloat64}, s) s, err = Sum[float64](otelSumInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, s) assert.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, Gauge[int64](otelGaugeInt64)) require.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, Gauge[float64](otelGaugeFloat64)) e, err := ExponentialHistogram(otelExpoHistInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e) e, err = ExponentialHistogram(otelExpoHistFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e) e, err = ExponentialHistogram(otelExpoHistInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, e) // Metrics. m, err := Metrics(otelMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbMetrics, m) // Scope Metrics. sm, err := ScopeMetrics(otelScopeMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbScopeMetrics, sm) // Resource Metrics. rm, err := ResourceMetrics(otelResourceMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbResourceMetrics, rm) } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/version.go000066400000000000000000000015271452547353200274020ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" // Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use. func Version() string { return "0.44.0" } opentelemetry-go-1.21.0/exporters/otlp/otlpmetric/otlpmetrichttp/version_test.go000066400000000000000000000020631452547353200304350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpmetrichttp import ( "regexp" "testing" "github.com/stretchr/testify/assert" ) // regex taken from https://github.com/Masterminds/semver/tree/v3.1.1 var versionRegex = regexp.MustCompile(`^v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$`) func TestVersionSemver(t *testing.T) { v := Version() assert.NotNil(t, versionRegex.FindStringSubmatch(v), "version is not semver: %s", v) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/000077500000000000000000000000001452547353200221125ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/clients.go000066400000000000000000000044151452547353200241060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" import ( "context" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // Client manages connections to the collector, handles the // transformation of data into wire format, and the transmission of that // data to the collector. type Client interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Start should establish connection(s) to endpoint(s). It is // called just once by the exporter, so the implementation // does not need to worry about idempotence and locking. Start(ctx context.Context) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Stop should close the connections. The function is called // only once by the exporter, so the implementation does not // need to worry about idempotence, but it may be called // concurrently with UploadTraces, so proper // locking is required. The function serves as a // synchronization point - after the function returns, the // process of closing connections is assumed to be finished. Stop(ctx context.Context) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // UploadTraces should transform the passed traces to the wire // format and send it to the collector. May be called // concurrently. UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/doc.go000066400000000000000000000016601452547353200232110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package otlptrace contains abstractions for OTLP span exporters. See the official OTLP span exporter implementations: - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc], - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]. */ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" opentelemetry-go-1.21.0/exporters/otlp/otlptrace/exporter.go000066400000000000000000000052411452547353200243130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" import ( "context" "errors" "fmt" "sync" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" tracesdk "go.opentelemetry.io/otel/sdk/trace" ) var errAlreadyStarted = errors.New("already started") // Exporter exports trace data in the OTLP wire format. type Exporter struct { client Client mu sync.RWMutex started bool startOnce sync.Once stopOnce sync.Once } // ExportSpans exports a batch of spans. func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error { protoSpans := tracetransform.Spans(ss) if len(protoSpans) == 0 { return nil } err := e.client.UploadTraces(ctx, protoSpans) if err != nil { return fmt.Errorf("traces export: %w", err) } return nil } // Start establishes a connection to the receiving endpoint. func (e *Exporter) Start(ctx context.Context) error { err := errAlreadyStarted e.startOnce.Do(func() { e.mu.Lock() e.started = true e.mu.Unlock() err = e.client.Start(ctx) }) return err } // Shutdown flushes all exports and closes all connections to the receiving endpoint. func (e *Exporter) Shutdown(ctx context.Context) error { e.mu.RLock() started := e.started e.mu.RUnlock() if !started { return nil } var err error e.stopOnce.Do(func() { err = e.client.Stop(ctx) e.mu.Lock() e.started = false e.mu.Unlock() }) return err } var _ tracesdk.SpanExporter = (*Exporter)(nil) // New constructs a new Exporter and starts it. func New(ctx context.Context, client Client) (*Exporter, error) { exp := NewUnstarted(client) if err := exp.Start(ctx); err != nil { return nil, err } return exp, nil } // NewUnstarted constructs a new Exporter and does not start it. func NewUnstarted(client Client) *Exporter { return &Exporter{ client: client, } } // MarshalLog is the marshaling function used by the logging system to represent this exporter. func (e *Exporter) MarshalLog() interface{} { return struct { Type string Client Client }{ Type: "otlptrace", Client: e.client, } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/exporter_test.go000066400000000000000000000031661452547353200253560ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptrace_test import ( "context" "errors" "strings" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/sdk/trace/tracetest" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type client struct { uploadErr error } var _ otlptrace.Client = &client{} func (c *client) Start(ctx context.Context) error { return nil } func (c *client) Stop(ctx context.Context) error { return nil } func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { return c.uploadErr } func TestExporterClientError(t *testing.T) { ctx := context.Background() exp, err := otlptrace.New(ctx, &client{ uploadErr: context.Canceled, }) assert.NoError(t, err) spans := tracetest.SpanStubs{{Name: "Span 0"}}.Snapshots() err = exp.ExportSpans(ctx, spans) assert.Error(t, err) assert.True(t, errors.Is(err, context.Canceled)) assert.True(t, strings.HasPrefix(err.Error(), "traces export: "), err) assert.NoError(t, exp.Shutdown(ctx)) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/go.mod000066400000000000000000000020171452547353200232200ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/otlp/otlptrace go 1.20 require ( github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 go.opentelemetry.io/proto/otlp v1.0.0 google.golang.org/protobuf v1.31.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel => ../../.. replace go.opentelemetry.io/otel/sdk => ../../../sdk replace go.opentelemetry.io/otel/trace => ../../../trace replace go.opentelemetry.io/otel/metric => ../../../metric opentelemetry-go-1.21.0/exporters/otlp/otlptrace/go.sum000066400000000000000000000066401452547353200232530ustar00rootroot00000000000000github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/000077500000000000000000000000001452547353200237265ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/tracetransform/000077500000000000000000000000001452547353200267605ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/tracetransform/attribute.go000066400000000000000000000102221452547353200313070ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue { if len(attrs) == 0 { return nil } out := make([]*commonpb.KeyValue, 0, len(attrs)) for _, kv := range attrs { out = append(out, KeyValue(kv)) } return out } // Iterator transforms an attribute iterator into OTLP key-values. func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { l := iter.Len() if l == 0 { return nil } out := make([]*commonpb.KeyValue, 0, l) for iter.Next() { out = append(out, KeyValue(iter.Attribute())) } return out } // ResourceAttributes transforms a Resource OTLP key-values. func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue { return Iterator(res.Iter()) } // KeyValue transforms an attribute KeyValue into an OTLP key-value. func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue { return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} } // Value transforms an attribute Value into an OTLP AnyValue. func Value(v attribute.Value) *commonpb.AnyValue { av := new(commonpb.AnyValue) switch v.Type() { case attribute.BOOL: av.Value = &commonpb.AnyValue_BoolValue{ BoolValue: v.AsBool(), } case attribute.BOOLSLICE: av.Value = &commonpb.AnyValue_ArrayValue{ ArrayValue: &commonpb.ArrayValue{ Values: boolSliceValues(v.AsBoolSlice()), }, } case attribute.INT64: av.Value = &commonpb.AnyValue_IntValue{ IntValue: v.AsInt64(), } case attribute.INT64SLICE: av.Value = &commonpb.AnyValue_ArrayValue{ ArrayValue: &commonpb.ArrayValue{ Values: int64SliceValues(v.AsInt64Slice()), }, } case attribute.FLOAT64: av.Value = &commonpb.AnyValue_DoubleValue{ DoubleValue: v.AsFloat64(), } case attribute.FLOAT64SLICE: av.Value = &commonpb.AnyValue_ArrayValue{ ArrayValue: &commonpb.ArrayValue{ Values: float64SliceValues(v.AsFloat64Slice()), }, } case attribute.STRING: av.Value = &commonpb.AnyValue_StringValue{ StringValue: v.AsString(), } case attribute.STRINGSLICE: av.Value = &commonpb.AnyValue_ArrayValue{ ArrayValue: &commonpb.ArrayValue{ Values: stringSliceValues(v.AsStringSlice()), }, } default: av.Value = &commonpb.AnyValue_StringValue{ StringValue: "INVALID", } } return av } func boolSliceValues(vals []bool) []*commonpb.AnyValue { converted := make([]*commonpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &commonpb.AnyValue{ Value: &commonpb.AnyValue_BoolValue{ BoolValue: v, }, } } return converted } func int64SliceValues(vals []int64) []*commonpb.AnyValue { converted := make([]*commonpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &commonpb.AnyValue{ Value: &commonpb.AnyValue_IntValue{ IntValue: v, }, } } return converted } func float64SliceValues(vals []float64) []*commonpb.AnyValue { converted := make([]*commonpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &commonpb.AnyValue{ Value: &commonpb.AnyValue_DoubleValue{ DoubleValue: v, }, } } return converted } func stringSliceValues(vals []string) []*commonpb.AnyValue { converted := make([]*commonpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &commonpb.AnyValue{ Value: &commonpb.AnyValue_StringValue{ StringValue: v, }, } } return converted } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/tracetransform/attribute_test.go000066400000000000000000000152731452547353200323610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetransform import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) type attributeTest struct { attrs []attribute.KeyValue expected []*commonpb.KeyValue } func TestAttributes(t *testing.T) { for _, test := range []attributeTest{ {nil, nil}, { []attribute.KeyValue{ attribute.Int("int to int", 123), attribute.Int64("int64 to int64", 1234567), attribute.Float64("float64 to double", 1.61), attribute.String("string to string", "string"), attribute.Bool("bool to bool", true), }, []*commonpb.KeyValue{ { Key: "int to int", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_IntValue{ IntValue: 123, }, }, }, { Key: "int64 to int64", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_IntValue{ IntValue: 1234567, }, }, }, { Key: "float64 to double", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_DoubleValue{ DoubleValue: 1.61, }, }, }, { Key: "string to string", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_StringValue{ StringValue: "string", }, }, }, { Key: "bool to bool", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_BoolValue{ BoolValue: true, }, }, }, }, }, } { got := KeyValues(test.attrs) if !assert.Len(t, got, len(test.expected)) { continue } for i, actual := range got { if a, ok := actual.Value.Value.(*commonpb.AnyValue_DoubleValue); ok { e, ok := test.expected[i].Value.Value.(*commonpb.AnyValue_DoubleValue) if !ok { t.Errorf("expected AnyValue_DoubleValue, got %T", test.expected[i].Value.Value) continue } if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) { continue } e.DoubleValue = a.DoubleValue } assert.Equal(t, test.expected[i], actual) } } } func TestArrayAttributes(t *testing.T) { // Array KeyValue supports only arrays of primitive types: // "bool", "int", "int64", // "float64", "string", for _, test := range []attributeTest{ {nil, nil}, { []attribute.KeyValue{ { Key: attribute.Key("invalid"), Value: attribute.Value{}, }, }, []*commonpb.KeyValue{ { Key: "invalid", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_StringValue{ StringValue: "INVALID", }, }, }, }, }, { []attribute.KeyValue{ attribute.BoolSlice("bool slice to bool array", []bool{true, false}), attribute.IntSlice("int slice to int64 array", []int{1, 2, 3}), attribute.Int64Slice("int64 slice to int64 array", []int64{1, 2, 3}), attribute.Float64Slice("float64 slice to double array", []float64{1.11, 2.22, 3.33}), attribute.StringSlice("string slice to string array", []string{"foo", "bar", "baz"}), }, []*commonpb.KeyValue{ newOTelBoolArray("bool slice to bool array", []bool{true, false}), newOTelIntArray("int slice to int64 array", []int64{1, 2, 3}), newOTelIntArray("int64 slice to int64 array", []int64{1, 2, 3}), newOTelDoubleArray("float64 slice to double array", []float64{1.11, 2.22, 3.33}), newOTelStringArray("string slice to string array", []string{"foo", "bar", "baz"}), }, }, } { actualArrayAttributes := KeyValues(test.attrs) expectedArrayAttributes := test.expected if !assert.Len(t, actualArrayAttributes, len(expectedArrayAttributes)) { continue } for i, actualArrayAttr := range actualArrayAttributes { expectedArrayAttr := expectedArrayAttributes[i] expectedKey, actualKey := expectedArrayAttr.Key, actualArrayAttr.Key if !assert.Equal(t, expectedKey, actualKey) { continue } expected := expectedArrayAttr.Value.GetArrayValue() actual := actualArrayAttr.Value.GetArrayValue() if expected == nil { assert.Nil(t, actual) continue } if assert.NotNil(t, actual, "expected not nil for %s", actualKey) { assertExpectedArrayValues(t, expected.Values, actual.Values) } } } } func assertExpectedArrayValues(t *testing.T, expectedValues, actualValues []*commonpb.AnyValue) { for i, actual := range actualValues { expected := expectedValues[i] if a, ok := actual.Value.(*commonpb.AnyValue_DoubleValue); ok { e, ok := expected.Value.(*commonpb.AnyValue_DoubleValue) if !ok { t.Errorf("expected AnyValue_DoubleValue, got %T", expected.Value) continue } if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) { continue } e.DoubleValue = a.DoubleValue } assert.Equal(t, expected, actual) } } func newOTelBoolArray(key string, values []bool) *commonpb.KeyValue { arrayValues := []*commonpb.AnyValue{} for _, b := range values { arrayValues = append(arrayValues, &commonpb.AnyValue{ Value: &commonpb.AnyValue_BoolValue{ BoolValue: b, }, }) } return newOTelArray(key, arrayValues) } func newOTelIntArray(key string, values []int64) *commonpb.KeyValue { arrayValues := []*commonpb.AnyValue{} for _, i := range values { arrayValues = append(arrayValues, &commonpb.AnyValue{ Value: &commonpb.AnyValue_IntValue{ IntValue: i, }, }) } return newOTelArray(key, arrayValues) } func newOTelDoubleArray(key string, values []float64) *commonpb.KeyValue { arrayValues := []*commonpb.AnyValue{} for _, d := range values { arrayValues = append(arrayValues, &commonpb.AnyValue{ Value: &commonpb.AnyValue_DoubleValue{ DoubleValue: d, }, }) } return newOTelArray(key, arrayValues) } func newOTelStringArray(key string, values []string) *commonpb.KeyValue { arrayValues := []*commonpb.AnyValue{} for _, s := range values { arrayValues = append(arrayValues, &commonpb.AnyValue{ Value: &commonpb.AnyValue_StringValue{ StringValue: s, }, }) } return newOTelArray(key, arrayValues) } func newOTelArray(key string, arrayValues []*commonpb.AnyValue) *commonpb.KeyValue { return &commonpb.KeyValue{ Key: key, Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_ArrayValue{ ArrayValue: &commonpb.ArrayValue{ Values: arrayValues, }, }, }, } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go000066400000000000000000000020301452547353200325450ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( "go.opentelemetry.io/otel/sdk/instrumentation" commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { if il == (instrumentation.Scope{}) { return nil } return &commonpb.InstrumentationScope{ Name: il.Name, Version: il.Version, } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/tracetransform/resource.go000066400000000000000000000020071452547353200311350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( "go.opentelemetry.io/otel/sdk/resource" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" ) // Resource transforms a Resource into an OTLP Resource. func Resource(r *resource.Resource) *resourcepb.Resource { if r == nil { return nil } return &resourcepb.Resource{Attributes: ResourceAttributes(r)} } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/tracetransform/resource_test.go000066400000000000000000000025541452547353200322030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetransform import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" ) func TestNilResource(t *testing.T) { assert.Empty(t, Resource(nil)) } func TestEmptyResource(t *testing.T) { assert.Empty(t, Resource(&resource.Resource{})) } /* * This does not include any testing on the ordering of Resource Attributes. * They are stored as a map internally to the Resource and their order is not * guaranteed. */ func TestResourceAttributes(t *testing.T) { attrs := []attribute.KeyValue{attribute.Int("one", 1), attribute.Int("two", 2)} got := Resource(resource.NewSchemaless(attrs...)).GetAttributes() if !assert.Len(t, attrs, 2) { return } assert.ElementsMatch(t, KeyValues(attrs), got) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/tracetransform/span.go000066400000000000000000000135451452547353200302600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // Spans transforms a slice of OpenTelemetry spans into a slice of OTLP // ResourceSpans. func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { if len(sdl) == 0 { return nil } rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans) type key struct { r attribute.Distinct is instrumentation.Scope } ssm := make(map[key]*tracepb.ScopeSpans) var resources int for _, sd := range sdl { if sd == nil { continue } rKey := sd.Resource().Equivalent() k := key{ r: rKey, is: sd.InstrumentationScope(), } scopeSpan, iOk := ssm[k] if !iOk { // Either the resource or instrumentation scope were unknown. scopeSpan = &tracepb.ScopeSpans{ Scope: InstrumentationScope(sd.InstrumentationScope()), Spans: []*tracepb.Span{}, SchemaUrl: sd.InstrumentationScope().SchemaURL, } } scopeSpan.Spans = append(scopeSpan.Spans, span(sd)) ssm[k] = scopeSpan rs, rOk := rsm[rKey] if !rOk { resources++ // The resource was unknown. rs = &tracepb.ResourceSpans{ Resource: Resource(sd.Resource()), ScopeSpans: []*tracepb.ScopeSpans{scopeSpan}, SchemaUrl: sd.Resource().SchemaURL(), } rsm[rKey] = rs continue } // The resource has been seen before. Check if the instrumentation // library lookup was unknown because if so we need to add it to the // ResourceSpans. Otherwise, the instrumentation library has already // been seen and the append we did above will be included it in the // ScopeSpans reference. if !iOk { rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan) } } // Transform the categorized map into a slice rss := make([]*tracepb.ResourceSpans, 0, resources) for _, rs := range rsm { rss = append(rss, rs) } return rss } // span transforms a Span into an OTLP span. func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { if sd == nil { return nil } tid := sd.SpanContext().TraceID() sid := sd.SpanContext().SpanID() s := &tracepb.Span{ TraceId: tid[:], SpanId: sid[:], TraceState: sd.SpanContext().TraceState().String(), Status: status(sd.Status().Code, sd.Status().Description), StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), Links: links(sd.Links()), Kind: spanKind(sd.SpanKind()), Name: sd.Name(), Attributes: KeyValues(sd.Attributes()), Events: spanEvents(sd.Events()), DroppedAttributesCount: uint32(sd.DroppedAttributes()), DroppedEventsCount: uint32(sd.DroppedEvents()), DroppedLinksCount: uint32(sd.DroppedLinks()), } if psid := sd.Parent().SpanID(); psid.IsValid() { s.ParentSpanId = psid[:] } return s } // status transform a span code and message into an OTLP span status. func status(status codes.Code, message string) *tracepb.Status { var c tracepb.Status_StatusCode switch status { case codes.Ok: c = tracepb.Status_STATUS_CODE_OK case codes.Error: c = tracepb.Status_STATUS_CODE_ERROR default: c = tracepb.Status_STATUS_CODE_UNSET } return &tracepb.Status{ Code: c, Message: message, } } // links transforms span Links to OTLP span links. func links(links []tracesdk.Link) []*tracepb.Span_Link { if len(links) == 0 { return nil } sl := make([]*tracepb.Span_Link, 0, len(links)) for _, otLink := range links { // This redefinition is necessary to prevent otLink.*ID[:] copies // being reused -- in short we need a new otLink per iteration. otLink := otLink tid := otLink.SpanContext.TraceID() sid := otLink.SpanContext.SpanID() sl = append(sl, &tracepb.Span_Link{ TraceId: tid[:], SpanId: sid[:], Attributes: KeyValues(otLink.Attributes), DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), }) } return sl } // spanEvents transforms span Events to an OTLP span events. func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { if len(es) == 0 { return nil } events := make([]*tracepb.Span_Event, len(es)) // Transform message events for i := 0; i < len(es); i++ { events[i] = &tracepb.Span_Event{ Name: es[i].Name, TimeUnixNano: uint64(es[i].Time.UnixNano()), Attributes: KeyValues(es[i].Attributes), DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), } } return events } // spanKind transforms a SpanKind to an OTLP span kind. func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind { switch kind { case trace.SpanKindInternal: return tracepb.Span_SPAN_KIND_INTERNAL case trace.SpanKindClient: return tracepb.Span_SPAN_KIND_CLIENT case trace.SpanKindServer: return tracepb.Span_SPAN_KIND_SERVER case trace.SpanKindProducer: return tracepb.Span_SPAN_KIND_PRODUCER case trace.SpanKindConsumer: return tracepb.Span_SPAN_KIND_CONSUMER default: return tracepb.Span_SPAN_KIND_UNSPECIFIED } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/internal/tracetransform/span_test.go000066400000000000000000000236221452547353200313140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetransform import ( "testing" "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) func TestSpanKind(t *testing.T) { for _, test := range []struct { kind trace.SpanKind expected tracepb.Span_SpanKind }{ { trace.SpanKindInternal, tracepb.Span_SPAN_KIND_INTERNAL, }, { trace.SpanKindClient, tracepb.Span_SPAN_KIND_CLIENT, }, { trace.SpanKindServer, tracepb.Span_SPAN_KIND_SERVER, }, { trace.SpanKindProducer, tracepb.Span_SPAN_KIND_PRODUCER, }, { trace.SpanKindConsumer, tracepb.Span_SPAN_KIND_CONSUMER, }, { trace.SpanKind(-1), tracepb.Span_SPAN_KIND_UNSPECIFIED, }, } { assert.Equal(t, test.expected, spanKind(test.kind)) } } func TestNilSpanEvent(t *testing.T) { assert.Nil(t, spanEvents(nil)) } func TestEmptySpanEvent(t *testing.T) { assert.Nil(t, spanEvents([]tracesdk.Event{})) } func TestSpanEvent(t *testing.T) { attrs := []attribute.KeyValue{attribute.Int("one", 1), attribute.Int("two", 2)} eventTime := time.Date(2020, 5, 20, 0, 0, 0, 0, time.UTC) got := spanEvents([]tracesdk.Event{ { Name: "test 1", Attributes: []attribute.KeyValue{}, Time: eventTime, }, { Name: "test 2", Attributes: attrs, Time: eventTime, DroppedAttributeCount: 2, }, }) if !assert.Len(t, got, 2) { return } eventTimestamp := uint64(1589932800 * 1e9) assert.Equal(t, &tracepb.Span_Event{Name: "test 1", Attributes: nil, TimeUnixNano: eventTimestamp}, got[0]) // Do not test Attributes directly, just that the return value goes to the correct field. assert.Equal(t, &tracepb.Span_Event{Name: "test 2", Attributes: KeyValues(attrs), TimeUnixNano: eventTimestamp, DroppedAttributesCount: 2}, got[1]) } func TestNilLinks(t *testing.T) { assert.Nil(t, links(nil)) } func TestEmptyLinks(t *testing.T) { assert.Nil(t, links([]tracesdk.Link{})) } func TestLinks(t *testing.T) { attrs := []attribute.KeyValue{attribute.Int("one", 1), attribute.Int("two", 2)} l := []tracesdk.Link{ { DroppedAttributeCount: 3, }, { SpanContext: trace.SpanContext{}, Attributes: attrs, DroppedAttributeCount: 3, }, } got := links(l) // Make sure we get the same number back first. if !assert.Len(t, got, 2) { return } // Empty should be empty. expected := &tracepb.Span_Link{ TraceId: []uint8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, SpanId: []uint8{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, DroppedAttributesCount: 3, } assert.Equal(t, expected, got[0]) // Do not test Attributes directly, just that the return value goes to the correct field. expected.Attributes = KeyValues(attrs) assert.Equal(t, expected, got[1]) // Changes to our links should not change the produced links. l[1].SpanContext = l[1].SpanContext.WithTraceID(trace.TraceID{}) assert.Equal(t, expected, got[1]) assert.Equal(t, l[1].DroppedAttributeCount, int(got[1].DroppedAttributesCount)) } func TestStatus(t *testing.T) { for _, test := range []struct { code codes.Code message string otlpStatus tracepb.Status_StatusCode }{ { codes.Ok, "test Ok", tracepb.Status_STATUS_CODE_OK, }, { codes.Unset, "test Unset", tracepb.Status_STATUS_CODE_UNSET, }, { message: "default code is unset", otlpStatus: tracepb.Status_STATUS_CODE_UNSET, }, { codes.Error, "test Error", tracepb.Status_STATUS_CODE_ERROR, }, } { expected := &tracepb.Status{Code: test.otlpStatus, Message: test.message} assert.Equal(t, expected, status(test.code, test.message)) } } func TestNilSpan(t *testing.T) { assert.Nil(t, span(nil)) } func TestNilSpanData(t *testing.T) { assert.Nil(t, Spans(nil)) } func TestEmptySpanData(t *testing.T) { assert.Nil(t, Spans(nil)) } func TestSpanData(t *testing.T) { // Full test of span data // March 31, 2020 5:01:26 1234nanos (UTC) startTime := time.Unix(1585674086, 1234) endTime := startTime.Add(10 * time.Second) traceState, _ := trace.ParseTraceState("key1=val1,key2=val2") spanData := tracetest.SpanStub{ SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, TraceState: traceState, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8}, TraceState: traceState, Remote: true, }), SpanKind: trace.SpanKindServer, Name: "span data to span data", StartTime: startTime, EndTime: endTime, Events: []tracesdk.Event{ { Time: startTime, Attributes: []attribute.KeyValue{ attribute.Int64("CompressedByteSize", 512), }, }, { Time: endTime, Attributes: []attribute.KeyValue{ attribute.String("EventType", "Recv"), }, }, }, Links: []tracesdk.Link{ { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF}, SpanID: trace.SpanID{0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7}, TraceFlags: 0, }), Attributes: []attribute.KeyValue{ attribute.String("LinkType", "Parent"), }, DroppedAttributeCount: 0, }, { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF}, SpanID: trace.SpanID{0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7}, TraceFlags: 0, }), Attributes: []attribute.KeyValue{ attribute.String("LinkType", "Child"), }, DroppedAttributeCount: 0, }, }, Status: tracesdk.Status{ Code: codes.Error, Description: "utterly unrecognized", }, Attributes: []attribute.KeyValue{ attribute.Int64("timeout_ns", 12e9), }, DroppedAttributes: 1, DroppedEvents: 2, DroppedLinks: 3, Resource: resource.NewWithAttributes( "http://example.com/custom-resource-schema", attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5), attribute.StringSlice("rk3", []string{"sv1", "sv2"}), ), InstrumentationLibrary: instrumentation.Scope{ Name: "go.opentelemetry.io/test/otel", Version: "v0.0.1", SchemaURL: semconv.SchemaURL, }, } // Not checking resource as the underlying map of our Resource makes // ordering impossible to guarantee on the output. The Resource // transform function has unit tests that should suffice. expectedSpan := &tracepb.Span{ TraceId: []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanId: []byte{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, ParentSpanId: []byte{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8}, TraceState: "key1=val1,key2=val2", Name: spanData.Name, Kind: tracepb.Span_SPAN_KIND_SERVER, StartTimeUnixNano: uint64(startTime.UnixNano()), EndTimeUnixNano: uint64(endTime.UnixNano()), Status: status(spanData.Status.Code, spanData.Status.Description), Events: spanEvents(spanData.Events), Links: links(spanData.Links), Attributes: KeyValues(spanData.Attributes), DroppedAttributesCount: 1, DroppedEventsCount: 2, DroppedLinksCount: 3, } got := Spans(tracetest.SpanStubs{spanData}.Snapshots()) require.Len(t, got, 1) assert.Equal(t, got[0].GetResource(), Resource(spanData.Resource)) assert.Equal(t, got[0].SchemaUrl, spanData.Resource.SchemaURL()) scopeSpans := got[0].GetScopeSpans() require.Len(t, scopeSpans, 1) assert.Equal(t, scopeSpans[0].SchemaUrl, spanData.InstrumentationLibrary.SchemaURL) assert.Equal(t, scopeSpans[0].GetScope(), InstrumentationScope(spanData.InstrumentationLibrary)) require.Len(t, scopeSpans[0].Spans, 1) actualSpan := scopeSpans[0].Spans[0] if diff := cmp.Diff(expectedSpan, actualSpan, cmp.Comparer(proto.Equal)); diff != "" { t.Fatalf("transformed span differs %v\n", diff) } } // Empty parent span ID should be treated as root span. func TestRootSpanData(t *testing.T) { sd := Spans(tracetest.SpanStubs{ {}, }.Snapshots()) require.Len(t, sd, 1) rs := sd[0] scopeSpans := rs.GetScopeSpans() require.Len(t, scopeSpans, 1) got := scopeSpans[0].GetSpans()[0].GetParentSpanId() // Empty means root span. assert.Nil(t, got, "incorrect transform of root parent span ID") } func TestSpanDataNilResource(t *testing.T) { assert.NotPanics(t, func() { Spans(tracetest.SpanStubs{ {}, }.Snapshots()) }) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/000077500000000000000000000000001452547353200247635ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/client.go000066400000000000000000000223001452547353200265650ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" import ( "context" "errors" "sync" "time" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type client struct { endpoint string dialOpts []grpc.DialOption metadata metadata.MD exportTimeout time.Duration requestFunc retry.RequestFunc // stopCtx is used as a parent context for all exports. Therefore, when it // is canceled with the stopFunc all exports are canceled. stopCtx context.Context // stopFunc cancels stopCtx, stopping any active exports. stopFunc context.CancelFunc // ourConn keeps track of where conn was created: true if created here on // Start, or false if passed with an option. This is important on Shutdown // as the conn should only be closed if created here on start. Otherwise, // it is up to the processes that passed the conn to close it. ourConn bool conn *grpc.ClientConn tscMu sync.RWMutex tsc coltracepb.TraceServiceClient } // Compile time check *client implements otlptrace.Client. var _ otlptrace.Client = (*client)(nil) // NewClient creates a new gRPC trace client. func NewClient(opts ...Option) otlptrace.Client { return newClient(opts...) } func newClient(opts ...Option) *client { cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) ctx, cancel := context.WithCancel(context.Background()) c := &client{ endpoint: cfg.Traces.Endpoint, exportTimeout: cfg.Traces.Timeout, requestFunc: cfg.RetryConfig.RequestFunc(retryable), dialOpts: cfg.DialOptions, stopCtx: ctx, stopFunc: cancel, conn: cfg.GRPCConn, } if len(cfg.Traces.Headers) > 0 { c.metadata = metadata.New(cfg.Traces.Headers) } return c } // Start establishes a gRPC connection to the collector. func (c *client) Start(ctx context.Context) error { if c.conn == nil { // If the caller did not provide a ClientConn when the client was // created, create one using the configuration they did provide. conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...) if err != nil { return err } // Keep track that we own the lifecycle of this conn and need to close // it on Shutdown. c.ourConn = true c.conn = conn } // The otlptrace.Client interface states this method is called just once, // so no need to check if already started. c.tscMu.Lock() c.tsc = coltracepb.NewTraceServiceClient(c.conn) c.tscMu.Unlock() return nil } var errAlreadyStopped = errors.New("the client is already stopped") // Stop shuts down the client. // // Any active connections to a remote endpoint are closed if they were created // by the client. Any gRPC connection passed during creation using // WithGRPCConn will not be closed. It is the caller's responsibility to // handle cleanup of that resource. // // This method synchronizes with the UploadTraces method of the client. It // will wait for any active calls to that method to complete unimpeded, or it // will cancel any active calls if ctx expires. If ctx expires, the context // error will be forwarded as the returned error. All client held resources // will still be released in this situation. // // If the client has already stopped, an error will be returned describing // this. func (c *client) Stop(ctx context.Context) error { // Make sure to return context error if the context is done when calling this method. err := ctx.Err() // Acquire the c.tscMu lock within the ctx lifetime. acquired := make(chan struct{}) go func() { c.tscMu.Lock() close(acquired) }() select { case <-ctx.Done(): // The Stop timeout is reached. Kill any remaining exports to force // the clear of the lock and save the timeout error to return and // signal the shutdown timed out before cleanly stopping. c.stopFunc() err = ctx.Err() // To ensure the client is not left in a dirty state c.tsc needs to be // set to nil. To avoid the race condition when doing this, ensure // that all the exports are killed (initiated by c.stopFunc). <-acquired case <-acquired: } // Hold the tscMu lock for the rest of the function to ensure no new // exports are started. defer c.tscMu.Unlock() // The otlptrace.Client interface states this method is called only // once, but there is no guarantee it is called after Start. Ensure the // client is started before doing anything and let the called know if they // made a mistake. if c.tsc == nil { return errAlreadyStopped } // Clear c.tsc to signal the client is stopped. c.tsc = nil if c.ourConn { closeErr := c.conn.Close() // A context timeout error takes precedence over this error. if err == nil && closeErr != nil { err = closeErr } } return err } var errShutdown = errors.New("the client is shutdown") // UploadTraces sends a batch of spans. // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { // Hold a read lock to ensure a shut down initiated after this starts does // not abandon the export. This read lock acquire has less priority than a // write lock acquire (i.e. Stop), meaning if the client is shutting down // this will come after the shut down. c.tscMu.RLock() defer c.tscMu.RUnlock() if c.tsc == nil { return errShutdown } ctx, cancel := c.exportContext(ctx) defer cancel() return c.requestFunc(ctx, func(iCtx context.Context) error { resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, }) if resp != nil && resp.PartialSuccess != nil { msg := resp.PartialSuccess.GetErrorMessage() n := resp.PartialSuccess.GetRejectedSpans() if n != 0 || msg != "" { err := internal.TracePartialSuccessError(n, msg) otel.Handle(err) } } // nil is converted to OK. if status.Code(err) == codes.OK { // Success. return nil } return err }) } // exportContext returns a copy of parent with an appropriate deadline and // cancellation function. // // It is the callers responsibility to cancel the returned context once its // use is complete, via the parent or directly with the returned CancelFunc, to // ensure all resources are correctly released. func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { var ( ctx context.Context cancel context.CancelFunc ) if c.exportTimeout > 0 { ctx, cancel = context.WithTimeout(parent, c.exportTimeout) } else { ctx, cancel = context.WithCancel(parent) } if c.metadata.Len() > 0 { ctx = metadata.NewOutgoingContext(ctx, c.metadata) } // Unify the client stopCtx with the parent. go func() { select { case <-ctx.Done(): case <-c.stopCtx.Done(): // Cancel the export as the shutdown has timed out. cancel() } }() return ctx, cancel } // retryable returns if err identifies a request that can be retried and a // duration to wait for if an explicit throttle time is included in err. func retryable(err error) (bool, time.Duration) { s := status.Convert(err) return retryableGRPCStatus(s) } func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { switch s.Code() { case codes.Canceled, codes.DeadlineExceeded, codes.Aborted, codes.OutOfRange, codes.Unavailable, codes.DataLoss: // Additionally handle RetryInfo. _, d := throttleDelay(s) return true, d case codes.ResourceExhausted: // Retry only if the server signals that the recovery from resource exhaustion is possible. return throttleDelay(s) } // Not a retry-able error. return false, 0 } // throttleDelay returns of the status is RetryInfo // and the its duration to wait for if an explicit throttle time. func throttleDelay(s *status.Status) (bool, time.Duration) { for _, detail := range s.Details() { if t, ok := detail.(*errdetails.RetryInfo); ok { return true, t.RetryDelay.AsDuration() } } return false, 0 } // MarshalLog is the marshaling function used by the logging system to represent this Client. func (c *client) MarshalLog() interface{} { return struct { Type string Endpoint string }{ Type: "otlphttpgrpc", Endpoint: c.endpoint, } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/client_test.go000066400000000000000000000275361452547353200276440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracegrpc_test import ( "context" "errors" "fmt" "net" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/goleak" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/status" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) func TestMain(m *testing.M) { goleak.VerifyTestMain(m) } var roSpans = tracetest.SpanStubs{{Name: "Span 0"}}.Snapshots() func contextWithTimeout(parent context.Context, t *testing.T, timeout time.Duration) (context.Context, context.CancelFunc) { d, ok := t.Deadline() if !ok { d = time.Now().Add(timeout) } else { d = d.Add(-1 * time.Millisecond) now := time.Now() if d.Sub(now) > timeout { d = now.Add(timeout) } } return context.WithDeadline(parent, d) } func TestNewEndToEnd(t *testing.T) { tests := []struct { name string additionalOpts []otlptracegrpc.Option }{ { name: "StandardExporter", }, { name: "WithCompressor", additionalOpts: []otlptracegrpc.Option{ otlptracegrpc.WithCompressor(gzip.Name), }, }, { name: "WithServiceConfig", additionalOpts: []otlptracegrpc.Option{ otlptracegrpc.WithServiceConfig("{}"), }, }, { name: "WithDialOptions", additionalOpts: []otlptracegrpc.Option{ otlptracegrpc.WithDialOption(grpc.WithBlock()), }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { newExporterEndToEndTest(t, test.additionalOpts) }) } } func newGRPCExporter(t *testing.T, ctx context.Context, endpoint string, additionalOpts ...otlptracegrpc.Option) *otlptrace.Exporter { opts := []otlptracegrpc.Option{ otlptracegrpc.WithInsecure(), otlptracegrpc.WithEndpoint(endpoint), otlptracegrpc.WithReconnectionPeriod(50 * time.Millisecond), } opts = append(opts, additionalOpts...) client := otlptracegrpc.NewClient(opts...) exp, err := otlptrace.New(ctx, client) if err != nil { t.Fatalf("failed to create a new collector exporter: %v", err) } return exp } func newExporterEndToEndTest(t *testing.T, additionalOpts []otlptracegrpc.Option) { mc := runMockCollector(t) ctx := context.Background() exp := newGRPCExporter(t, ctx, mc.endpoint, additionalOpts...) t.Cleanup(func() { ctx, cancel := contextWithTimeout(ctx, t, 10*time.Second) defer cancel() require.NoError(t, exp.Shutdown(ctx)) }) // RunEndToEndTest closes mc. otlptracetest.RunEndToEndTest(ctx, t, exp, mc) } func TestExporterShutdown(t *testing.T) { mc := runMockCollectorAtEndpoint(t, "localhost:0") t.Cleanup(func() { require.NoError(t, mc.stop()) }) factory := func() otlptrace.Client { return otlptracegrpc.NewClient( otlptracegrpc.WithEndpoint(mc.endpoint), otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()), ) } otlptracetest.RunExporterShutdownTest(t, factory) } func TestNewInvokeStartThenStopManyTimes(t *testing.T) { mc := runMockCollector(t) t.Cleanup(func() { require.NoError(t, mc.stop()) }) ctx := context.Background() exp := newGRPCExporter(t, ctx, mc.endpoint) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) // Invoke Start numerous times, should return errAlreadyStarted for i := 0; i < 10; i++ { if err := exp.Start(ctx); err == nil || !strings.Contains(err.Error(), "already started") { t.Fatalf("#%d unexpected Start error: %v", i, err) } } if err := exp.Shutdown(ctx); err != nil { t.Fatalf("failed to Shutdown the exporter: %v", err) } // Invoke Shutdown numerous times for i := 0; i < 10; i++ { if err := exp.Shutdown(ctx); err != nil { t.Fatalf(`#%d got error (%v) expected none`, i, err) } } } // This test takes a long time to run: to skip it, run tests using: -short. func TestNewCollectorOnBadConnection(t *testing.T) { if testing.Short() { t.Skipf("Skipping this long running test") } ln, err := net.Listen("tcp", "localhost:0") if err != nil { t.Fatalf("Failed to grab an available port: %v", err) } // Firstly close the "collector's" channel: optimistically this endpoint won't get reused ASAP // However, our goal of closing it is to simulate an unavailable connection _ = ln.Close() _, collectorPortStr, _ := net.SplitHostPort(ln.Addr().String()) endpoint := fmt.Sprintf("localhost:%s", collectorPortStr) ctx := context.Background() exp := newGRPCExporter(t, ctx, endpoint) require.NoError(t, exp.Shutdown(ctx)) } func TestNewWithEndpoint(t *testing.T) { mc := runMockCollector(t) t.Cleanup(func() { require.NoError(t, mc.stop()) }) ctx := context.Background() exp := newGRPCExporter(t, ctx, mc.endpoint) require.NoError(t, exp.Shutdown(ctx)) } func TestNewWithHeaders(t *testing.T) { mc := runMockCollector(t) t.Cleanup(func() { require.NoError(t, mc.stop()) }) ctx := context.Background() exp := newGRPCExporter(t, ctx, mc.endpoint, otlptracegrpc.WithHeaders(map[string]string{"header1": "value1"})) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) require.NoError(t, exp.ExportSpans(ctx, roSpans)) headers := mc.getHeaders() require.Regexp(t, "OTel OTLP Exporter Go/1\\..*", headers.Get("user-agent")) require.Len(t, headers.Get("header1"), 1) assert.Equal(t, "value1", headers.Get("header1")[0]) } func TestExportSpansTimeoutHonored(t *testing.T) { ctx, cancel := contextWithTimeout(context.Background(), t, 1*time.Minute) t.Cleanup(cancel) mc := runMockCollector(t) exportBlock := make(chan struct{}) mc.traceSvc.exportBlock = exportBlock t.Cleanup(func() { require.NoError(t, mc.stop()) }) exp := newGRPCExporter( t, ctx, mc.endpoint, otlptracegrpc.WithTimeout(1*time.Nanosecond), otlptracegrpc.WithRetry(otlptracegrpc.RetryConfig{Enabled: false}), ) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) err := exp.ExportSpans(ctx, roSpans) // Release the export so everything is cleaned up on shutdown. close(exportBlock) unwrapped := errors.Unwrap(err) require.Equal(t, codes.DeadlineExceeded, status.Convert(unwrapped).Code()) require.True(t, strings.HasPrefix(err.Error(), "traces export: "), err) } func TestNewWithMultipleAttributeTypes(t *testing.T) { mc := runMockCollector(t) ctx, cancel := contextWithTimeout(context.Background(), t, 10*time.Second) t.Cleanup(cancel) exp := newGRPCExporter(t, ctx, mc.endpoint) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) tp := sdktrace.NewTracerProvider( sdktrace.WithSampler(sdktrace.AlwaysSample()), sdktrace.WithBatcher( exp, // add following two options to ensure flush sdktrace.WithBatchTimeout(5*time.Second), sdktrace.WithMaxExportBatchSize(10), ), ) t.Cleanup(func() { require.NoError(t, tp.Shutdown(ctx)) }) tr := tp.Tracer("test-tracer") testKvs := []attribute.KeyValue{ attribute.Int("Int", 1), attribute.Int64("Int64", int64(3)), attribute.Float64("Float64", 2.22), attribute.Bool("Bool", true), attribute.String("String", "test"), } _, span := tr.Start(ctx, "AlwaysSample") span.SetAttributes(testKvs...) span.End() // Flush and close. func() { ctx, cancel := contextWithTimeout(ctx, t, 10*time.Second) defer cancel() require.NoError(t, tp.Shutdown(ctx)) }() // Wait >2 cycles. <-time.After(40 * time.Millisecond) // Now shutdown the exporter require.NoError(t, exp.Shutdown(ctx)) // Shutdown the collector too so that we can begin // verification checks of expected data back. require.NoError(t, mc.stop()) // Now verify that we only got one span rss := mc.getSpans() if got, want := len(rss), 1; got != want { t.Fatalf("resource span count: got %d, want %d\n", got, want) } expected := []*commonpb.KeyValue{ { Key: "Int", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_IntValue{ IntValue: 1, }, }, }, { Key: "Int64", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_IntValue{ IntValue: 3, }, }, }, { Key: "Float64", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_DoubleValue{ DoubleValue: 2.22, }, }, }, { Key: "Bool", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_BoolValue{ BoolValue: true, }, }, }, { Key: "String", Value: &commonpb.AnyValue{ Value: &commonpb.AnyValue_StringValue{ StringValue: "test", }, }, }, } // Verify attributes if !assert.Len(t, rss[0].Attributes, len(expected)) { t.Fatalf("attributes count: got %d, want %d\n", len(rss[0].Attributes), len(expected)) } for i, actual := range rss[0].Attributes { if a, ok := actual.Value.Value.(*commonpb.AnyValue_DoubleValue); ok { e, ok := expected[i].Value.Value.(*commonpb.AnyValue_DoubleValue) if !ok { t.Errorf("expected AnyValue_DoubleValue, got %T", expected[i].Value.Value) continue } if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) { continue } e.DoubleValue = a.DoubleValue } assert.Equal(t, expected[i], actual) } } func TestStartErrorInvalidAddress(t *testing.T) { client := otlptracegrpc.NewClient( otlptracegrpc.WithInsecure(), // Validate the connection in Start (which should return the error). otlptracegrpc.WithDialOption( grpc.WithBlock(), grpc.FailOnNonTempDialError(true), ), otlptracegrpc.WithEndpoint("invalid"), otlptracegrpc.WithReconnectionPeriod(time.Hour), ) err := client.Start(context.Background()) assert.EqualError(t, err, `connection error: desc = "transport: error while dialing: dial tcp: address invalid: missing port in address"`) } func TestEmptyData(t *testing.T) { mc := runMockCollector(t) t.Cleanup(func() { require.NoError(t, mc.stop()) }) ctx := context.Background() exp := newGRPCExporter(t, ctx, mc.endpoint) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) assert.NoError(t, exp.ExportSpans(ctx, nil)) } func TestPartialSuccess(t *testing.T) { mc := runMockCollectorWithConfig(t, &mockConfig{ partial: &coltracepb.ExportTracePartialSuccess{ RejectedSpans: 2, ErrorMessage: "partially successful", }, }) t.Cleanup(func() { require.NoError(t, mc.stop()) }) errs := []error{} otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { errs = append(errs, err) })) ctx := context.Background() exp := newGRPCExporter(t, ctx, mc.endpoint) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) require.NoError(t, exp.ExportSpans(ctx, roSpans)) require.Equal(t, 1, len(errs)) require.Contains(t, errs[0].Error(), "partially successful") require.Contains(t, errs[0].Error(), "2 spans rejected") } func TestCustomUserAgent(t *testing.T) { customUserAgent := "custom-user-agent" mc := runMockCollector(t) t.Cleanup(func() { require.NoError(t, mc.stop()) }) ctx := context.Background() exp := newGRPCExporter(t, ctx, mc.endpoint, otlptracegrpc.WithDialOption(grpc.WithUserAgent(customUserAgent))) t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) require.NoError(t, exp.ExportSpans(ctx, roSpans)) headers := mc.getHeaders() require.Contains(t, headers.Get("user-agent")[0], customUserAgent) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/client_unit_test.go000066400000000000000000000133511452547353200306710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracegrpc import ( "context" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/durationpb" ) func TestThrottleDelay(t *testing.T) { c := codes.ResourceExhausted testcases := []struct { status *status.Status wantOK bool wantDuration time.Duration }{ { status: status.New(c, "NoRetryInfo"), wantOK: false, wantDuration: 0, }, { status: func() *status.Status { s, err := status.New(c, "SingleRetryInfo").WithDetails( &errdetails.RetryInfo{ RetryDelay: durationpb.New(15 * time.Millisecond), }, ) require.NoError(t, err) return s }(), wantOK: true, wantDuration: 15 * time.Millisecond, }, { status: func() *status.Status { s, err := status.New(c, "ErrorInfo").WithDetails( &errdetails.ErrorInfo{Reason: "no throttle detail"}, ) require.NoError(t, err) return s }(), wantOK: false, wantDuration: 0, }, { status: func() *status.Status { s, err := status.New(c, "ErrorAndRetryInfo").WithDetails( &errdetails.ErrorInfo{Reason: "with throttle detail"}, &errdetails.RetryInfo{ RetryDelay: durationpb.New(13 * time.Minute), }, ) require.NoError(t, err) return s }(), wantOK: true, wantDuration: 13 * time.Minute, }, { status: func() *status.Status { s, err := status.New(c, "DoubleRetryInfo").WithDetails( &errdetails.RetryInfo{ RetryDelay: durationpb.New(13 * time.Minute), }, &errdetails.RetryInfo{ RetryDelay: durationpb.New(15 * time.Minute), }, ) require.NoError(t, err) return s }(), wantOK: true, wantDuration: 13 * time.Minute, }, } for _, tc := range testcases { t.Run(tc.status.Message(), func(t *testing.T) { ok, d := throttleDelay(tc.status) assert.Equal(t, tc.wantOK, ok) assert.Equal(t, tc.wantDuration, d) }) } } func TestRetryable(t *testing.T) { retryableCodes := map[codes.Code]bool{ codes.OK: false, codes.Canceled: true, codes.Unknown: false, codes.InvalidArgument: false, codes.DeadlineExceeded: true, codes.NotFound: false, codes.AlreadyExists: false, codes.PermissionDenied: false, codes.ResourceExhausted: false, codes.FailedPrecondition: false, codes.Aborted: true, codes.OutOfRange: true, codes.Unimplemented: false, codes.Internal: false, codes.Unavailable: true, codes.DataLoss: true, codes.Unauthenticated: false, } for c, want := range retryableCodes { got, _ := retryable(status.Error(c, "")) assert.Equalf(t, want, got, "evaluate(%s)", c) } } func TestRetryableGRPCStatusResourceExhaustedWithRetryInfo(t *testing.T) { delay := 15 * time.Millisecond s, err := status.New(codes.ResourceExhausted, "WithRetryInfo").WithDetails( &errdetails.RetryInfo{ RetryDelay: durationpb.New(delay), }, ) require.NoError(t, err) ok, d := retryableGRPCStatus(s) assert.True(t, ok) assert.Equal(t, delay, d) } func TestUnstartedStop(t *testing.T) { client := NewClient() assert.ErrorIs(t, client.Stop(context.Background()), errAlreadyStopped) } func TestUnstartedUploadTrace(t *testing.T) { client := NewClient() assert.ErrorIs(t, client.UploadTraces(context.Background(), nil), errShutdown) } func TestExportContextHonorsParentDeadline(t *testing.T) { now := time.Now() ctx, cancel := context.WithDeadline(context.Background(), now) t.Cleanup(cancel) // Without a client timeout, the parent deadline should be used. client := newClient(WithTimeout(0)) eCtx, eCancel := client.exportContext(ctx) t.Cleanup(eCancel) deadline, ok := eCtx.Deadline() assert.True(t, ok, "deadline not propagated to child context") assert.Equal(t, now, deadline) } func TestExportContextHonorsClientTimeout(t *testing.T) { // Setting a timeout should ensure a deadline is set on the context. client := newClient(WithTimeout(1 * time.Second)) ctx, cancel := client.exportContext(context.Background()) t.Cleanup(cancel) _, ok := ctx.Deadline() assert.True(t, ok, "timeout not set as deadline for child context") } func TestExportContextLinksStopSignal(t *testing.T) { rootCtx := context.Background() client := newClient(WithInsecure()) t.Cleanup(func() { require.NoError(t, client.Stop(rootCtx)) }) require.NoError(t, client.Start(rootCtx)) ctx, cancel := client.exportContext(rootCtx) t.Cleanup(cancel) require.False(t, func() bool { select { case <-ctx.Done(): return true default: } return false }(), "context should not be done prior to canceling it") // The client.stopFunc cancels the client.stopCtx. This should have been // setup as a parent of ctx. Therefore, it should cancel ctx as well. client.stopFunc() // Assert this with Eventually to account for goroutine scheduler timing. assert.Eventually(t, func() bool { select { case <-ctx.Done(): return true default: } return false }, 10*time.Second, time.Microsecond) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/doc.go000066400000000000000000000106171452547353200260640ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package otlptracegrpc provides an OTLP span exporter using gRPC. By default the telemetry is sent to https://localhost:4317. Exporter should be created using [New]. The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - target to which the exporter sends telemetry. The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. The value must contain a host. The value may additionally a port, a scheme, and a path. The value accepts "http" and "https" scheme. The value should not contain a query string or fragment. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") - setting "true" disables client transport security for the exporter's gRPC connection. You can use this only when an endpoint is provided without the http or https scheme. OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT. OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - key-value pairs used as gRPC metadata associated with gRPC requests. The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], except that additional semi-colon delimited metadata is not supported. Example value: "key1=value1,key2=value2". OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. The configuration can be overridden by [WithHeaders] option. OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") - maximum time in milliseconds the OTLP exporter waits for each batch export. OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. The configuration can be overridden by [WithTimeout] option. OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) - the gRPC compressor the exporter uses. Supported value: "gzip". OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) - the filepath to the trusted certificate to use when verifying a server's TLS credentials. OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) - the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) - the filepath to the clients private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. [W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content */ package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/example_test.go000066400000000000000000000022521452547353200300050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracegrpc_test import ( "context" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" "go.opentelemetry.io/otel/sdk/trace" ) func Example() { ctx := context.Background() exp, err := otlptracegrpc.New(ctx) if err != nil { panic(err) } tracerProvider := trace.NewTracerProvider(trace.WithBatcher(exp)) defer func() { if err := tracerProvider.Shutdown(ctx); err != nil { panic(err) } }() otel.SetTracerProvider(tracerProvider) // From here, the tracerProvider can be used by instrumentation to collect // telemetry. } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/exporter.go000066400000000000000000000021431452547353200271620ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" import ( "context" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ) // New constructs a new Exporter and starts it. func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) { return otlptrace.New(ctx, NewClient(opts...)) } // NewUnstarted constructs a new Exporter and does not start it. func NewUnstarted(opts ...Option) *otlptrace.Exporter { return otlptrace.NewUnstarted(NewClient(opts...)) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod000066400000000000000000000027211452547353200260730ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc go 1.20 require ( github.com/cenkalti/backoff/v4 v4.2.1 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 go.opentelemetry.io/proto/otlp v1.0.0 go.uber.org/goleak v1.3.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel => ../../../.. replace go.opentelemetry.io/otel/sdk => ../../../../sdk replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../ replace go.opentelemetry.io/otel/trace => ../../../../trace replace go.opentelemetry.io/otel/metric => ../../../../metric opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/go.sum000066400000000000000000000110401452547353200261120ustar00rootroot00000000000000github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/000077500000000000000000000000001452547353200265775ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/000077500000000000000000000000001452547353200305555ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go000066400000000000000000000131701452547353200330640ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" import ( "crypto/tls" "crypto/x509" "errors" "fmt" "net/url" "strconv" "strings" "time" "go.opentelemetry.io/otel/internal/global" ) // ConfigFn is the generic function used to set a config. type ConfigFn func(*EnvOptionsReader) // EnvOptionsReader reads the required environment variables. type EnvOptionsReader struct { GetEnv func(string) string ReadFile func(string) ([]byte, error) Namespace string } // Apply runs every ConfigFn. func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { for _, o := range opts { o(e) } } // GetEnvValue gets an OTLP environment variable value of the specified key // using the GetEnv function. // This function prepends the OTLP specified namespace to all key lookups. func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) return v, v != "" } // WithString retrieves the specified config and passes it to ConfigFn as a string. func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(v) } } } // WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. func WithBool(n string, fn func(bool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b := strings.ToLower(v) == "true" fn(b) } } } // WithDuration retrieves the specified config and passes it to ConfigFn as a duration. func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { d, err := strconv.Atoi(v) if err != nil { global.Error(err, "parse duration", "input", v) return } fn(time.Duration(d) * time.Millisecond) } } } // WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(stringToHeader(v)) } } } // WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { u, err := url.Parse(v) if err != nil { global.Error(err, "parse url", "input", v) return } fn(u) } } } // WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b, err := e.ReadFile(v) if err != nil { global.Error(err, "read tls ca cert file", "file", v) return } c, err := createCertPool(b) if err != nil { global.Error(err, "create tls cert pool") return } fn(c) } } } // WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { return func(e *EnvOptionsReader) { vc, okc := e.GetEnvValue(nc) vk, okk := e.GetEnvValue(nk) if !okc || !okk { return } cert, err := e.ReadFile(vc) if err != nil { global.Error(err, "read tls client cert", "file", vc) return } key, err := e.ReadFile(vk) if err != nil { global.Error(err, "read tls client key", "file", vk) return } crt, err := tls.X509KeyPair(cert, key) if err != nil { global.Error(err, "create tls client key pair") return } fn(crt) } } func keyWithNamespace(ns, key string) string { if ns == "" { return key } return fmt.Sprintf("%s_%s", ns, key) } func stringToHeader(value string) map[string]string { headersPairs := strings.Split(value, ",") headers := make(map[string]string) for _, header := range headersPairs { n, v, found := strings.Cut(header, "=") if !found { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } name, err := url.PathUnescape(n) if err != nil { global.Error(err, "escape header key", "key", n) continue } trimmedName := strings.TrimSpace(name) value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) continue } trimmedValue := strings.TrimSpace(value) headers[trimmedName] = trimmedValue } return headers } func createCertPool(certBytes []byte) (*x509.CertPool, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return cp, nil } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig_test.go000066400000000000000000000260371452547353200341310ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig import ( "crypto/tls" "crypto/x509" "errors" "net/url" "testing" "time" "github.com/stretchr/testify/assert" ) const WeakKey = ` -----BEGIN EC PRIVATE KEY----- MHcCAQEEIEbrSPmnlSOXvVzxCyv+VR3a0HDeUTvOcqrdssZ2k4gFoAoGCCqGSM49 AwEHoUQDQgAEDMTfv75J315C3K9faptS9iythKOMEeV/Eep73nWX531YAkmmwBSB 2dXRD/brsgLnfG57WEpxZuY7dPRbxu33BA== -----END EC PRIVATE KEY----- ` const WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBjjCCATWgAwIBAgIUKQSMC66MUw+kPp954ZYOcyKAQDswCgYIKoZIzj0EAwIw EjEQMA4GA1UECgwHb3RlbC1nbzAeFw0yMjEwMTkwMDA5MTlaFw0yMzEwMTkwMDA5 MTlaMBIxEDAOBgNVBAoMB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC AAQMxN+/vknfXkLcr19qm1L2LK2Eo4wR5X8R6nvedZfnfVgCSabAFIHZ1dEP9uuy Aud8bntYSnFm5jt09FvG7fcEo2kwZzAdBgNVHQ4EFgQUicGuhnTTkYLZwofXMNLK SHFeCWgwHwYDVR0jBBgwFoAUicGuhnTTkYLZwofXMNLKSHFeCWgwDwYDVR0TAQH/ BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIg Lfma8FnnxeSOi6223AsFfYwsNZ2RderNsQrS0PjEHb0CIBkrWacqARUAu7uT4cGu jVcIxYQqhId5L8p/mAv2PWZS -----END CERTIFICATE----- ` type testOption struct { TestString string TestBool bool TestDuration time.Duration TestHeaders map[string]string TestURL *url.URL TestTLS *tls.Config } func TestEnvConfig(t *testing.T) { parsedURL, err := url.Parse("https://example.com") assert.NoError(t, err) options := []testOption{} for _, testcase := range []struct { name string reader EnvOptionsReader configs []ConfigFn expectedOptions []testOption }{ { name: "with no namespace and a matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HOLA", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a namespace and a matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "MY_NAMESPACE_HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "true" } else if n == "WORLD" { return "false" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), WithBool("WORLD", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: true, }, { TestBool: false, }, }, }, { name: "with an invalid bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: false, }, }, }, { name: "with a duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "60" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{ { TestDuration: 60_000_000, // 60 milliseconds }, }, }, { name: "with an invalid duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{}, }, { name: "with headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "userId=42,userName=alice" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{ "userId": "42", "userName": "alice", }, }, }, }, { name: "with invalid headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{}, }, }, }, { name: "with URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "https://example.com" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{ { TestURL: parsedURL, }, }, }, { name: "with invalid URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "i nvalid://url" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{}, }, } { t.Run(testcase.name, func(t *testing.T) { testcase.reader.Apply(testcase.configs...) assert.Equal(t, testcase.expectedOptions, options) options = []testOption{} }) } } func TestWithTLSConfig(t *testing.T) { pool, err := createCertPool([]byte(WeakCertificate)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { if n == "CERTIFICATE" { return "/path/cert.pem" } return "" }, ReadFile: func(p string) ([]byte, error) { if p == "/path/cert.pem" { return []byte(WeakCertificate), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithCertPool("CERTIFICATE", func(cp *x509.CertPool) { option = testOption{TestTLS: &tls.Config{RootCAs: cp}} }), ) // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, pool.Subjects(), option.TestTLS.RootCAs.Subjects()) } func TestWithClientCert(t *testing.T) { cert, err := tls.X509KeyPair([]byte(WeakCertificate), []byte(WeakKey)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { switch n { case "CLIENT_CERTIFICATE": return "/path/tls.crt" case "CLIENT_KEY": return "/path/tls.key" } return "" }, ReadFile: func(n string) ([]byte, error) { switch n { case "/path/tls.crt": return []byte(WeakCertificate), nil case "/path/tls.key": return []byte(WeakKey), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Equal(t, cert, option.TestTLS.Certificates[0]) reader.ReadFile = func(s string) ([]byte, error) { return nil, errors.New("oops") } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) reader.GetEnv = func(s string) string { return "" } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) } func TestStringToHeader(t *testing.T) { tests := []struct { name string value string want map[string]string }{ { name: "simple test", value: "userId=alice", want: map[string]string{"userId": "alice"}, }, { name: "simple test with spaces", value: " userId = alice ", want: map[string]string{"userId": "alice"}, }, { name: "simple header conforms to RFC 3986 spec", value: " userId = alice+test ", want: map[string]string{"userId": "alice+test"}, }, { name: "multiple headers encoded", value: "userId=alice,serverNode=DF%3A28,isProduction=false", want: map[string]string{ "userId": "alice", "serverNode": "DF:28", "isProduction": "false", }, }, { name: "multiple headers encoded per RFC 3986 spec", value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test", want: map[string]string{ "userId": "alice+test", "serverNode": "DF:28", "isProduction": "false", "namespace": "localhost/test", }, }, { name: "invalid headers format", value: "userId:alice", want: map[string]string{}, }, { name: "invalid key", value: "%XX=missing,userId=alice", want: map[string]string{ "userId": "alice", }, }, { name: "invalid value", value: "missing=%XX,userId=alice", want: map[string]string{ "userId": "alice", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.want, stringToHeader(tt.value)) }) } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go000066400000000000000000000060471452547353200277060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/000077500000000000000000000000001452547353200307435ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go000066400000000000000000000124301452547353200332500ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( "crypto/tls" "crypto/x509" "net/url" "os" "path" "strings" "time" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" ) // DefaultEnvOptionsReader is the default environments reader. var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: os.Getenv, ReadFile: os.ReadFile, Namespace: "OTEL_EXPORTER_OTLP", } // ApplyGRPCEnvConfigs applies the env configurations for gRPC. func ApplyGRPCEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } return cfg } // ApplyHTTPEnvConfigs applies the env configurations for HTTP. func ApplyHTTPEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } return cfg } func getOptionsFromEnv() []GenericOption { opts := []GenericOption{} tlsConf := &tls.Config{} DefaultEnvOptionsReader.Apply( envconfig.WithURL("ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Traces.Endpoint = u.Host // For OTLP/HTTP endpoint URLs without a per-signal // configuration, the passed endpoint is used as a base URL // and the signals are sent to these paths relative to that. cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) return cfg }, withEndpointForGRPC(u))) }), envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Traces.Endpoint = u.Host // For endpoint URLs for OTLP/HTTP per-signal variables, the // URL MUST be used as-is without any modification. The only // exception is that if an URL contains no path part, the root // path / MUST be used. path := u.Path if path == "" { path = "/" } cfg.Traces.URLPath = path return cfg }, withEndpointForGRPC(u))) }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), ) return opts } func withEndpointScheme(u *url.URL) GenericOption { switch strings.ToLower(u.Scheme) { case "http", "unix": return WithInsecure() default: return WithSecure() } } func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { return func(cfg Config) Config { // For OTLP/gRPC endpoints, this is the target to which the // exporter is going to send telemetry. cfg.Traces.Endpoint = path.Join(u.Host, u.Path) return cfg } } // WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { cp := NoCompression if v == "gzip" { cp = GzipCompression } fn(cp) } } } // revive:disable-next-line:flag-parameter func withInsecure(b bool) GenericOption { if b { return WithInsecure() } return WithSecure() } func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if c.RootCAs != nil || len(c.Certificates) > 0 { fn(c) } } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go000066400000000000000000000211501452547353200327640ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( "crypto/tls" "fmt" "path" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" ) const ( // DefaultTracesPath is a default URL path for endpoint that // receives spans. DefaultTracesPath string = "/v1/traces" // DefaultTimeout is a default max waiting time for the backend to process // each span batch. DefaultTimeout time.Duration = 10 * time.Second ) type ( SignalConfig struct { Endpoint string Insecure bool TLSCfg *tls.Config Headers map[string]string Compression Compression Timeout time.Duration URLPath string // gRPC configurations GRPCCredentials credentials.TransportCredentials } Config struct { // Signal specific configurations Traces SignalConfig RetryConfig retry.Config // gRPC configurations ReconnectionPeriod time.Duration ServiceConfig string DialOptions []grpc.DialOption GRPCConn *grpc.ClientConn } ) // NewHTTPConfig returns a new Config with all settings applied from opts and // any unset setting using the default HTTP config values. func NewHTTPConfig(opts ...HTTPOption) Config { cfg := Config{ Traces: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), URLPath: DefaultTracesPath, Compression: NoCompression, Timeout: DefaultTimeout, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyHTTPEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) return cfg } // cleanPath returns a path with all spaces trimmed and all redundancies // removed. If urlPath is empty or cleaning it results in an empty string, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { tmp := path.Clean(strings.TrimSpace(urlPath)) if tmp == "." { return defaultPath } if !path.IsAbs(tmp) { tmp = fmt.Sprintf("/%s", tmp) } return tmp } // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() cfg := Config{ Traces: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), URLPath: DefaultTracesPath, Compression: NoCompression, Timeout: DefaultTimeout, }, RetryConfig: retry.DefaultConfig, DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } // Priroritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) } else { // Default to using the host's root CA. creds := credentials.NewTLS(nil) cfg.Traces.GRPCCredentials = creds cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) } if cfg.Traces.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, MinConnectTimeout: cfg.ReconnectionPeriod, } cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) } return cfg } type ( // GenericOption applies an option to the HTTP or gRPC driver. GenericOption interface { ApplyHTTPOption(Config) Config ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // HTTPOption applies an option to the HTTP driver. HTTPOption interface { ApplyHTTPOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // GRPCOption applies an option to the gRPC driver. GRPCOption interface { ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } ) // genericOption is an option that applies the same logic // for both gRPC and HTTP. type genericOption struct { fn func(Config) Config } func (g *genericOption) ApplyGRPCOption(cfg Config) Config { return g.fn(cfg) } func (g *genericOption) ApplyHTTPOption(cfg Config) Config { return g.fn(cfg) } func (genericOption) private() {} func newGenericOption(fn func(cfg Config) Config) GenericOption { return &genericOption{fn: fn} } // splitOption is an option that applies different logics // for gRPC and HTTP. type splitOption struct { httpFn func(Config) Config grpcFn func(Config) Config } func (g *splitOption) ApplyGRPCOption(cfg Config) Config { return g.grpcFn(cfg) } func (g *splitOption) ApplyHTTPOption(cfg Config) Config { return g.httpFn(cfg) } func (splitOption) private() {} func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { return &splitOption{httpFn: httpFn, grpcFn: grpcFn} } // httpOption is an option that is only applied to the HTTP driver. type httpOption struct { fn func(Config) Config } func (h *httpOption) ApplyHTTPOption(cfg Config) Config { return h.fn(cfg) } func (httpOption) private() {} func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { return &httpOption{fn: fn} } // grpcOption is an option that is only applied to the gRPC driver. type grpcOption struct { fn func(Config) Config } func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { return h.fn(cfg) } func (grpcOption) private() {} func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { return &grpcOption{fn: fn} } // Generic Options func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint return cfg }) } func WithCompression(compression Compression) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Compression = compression return cfg }) } func WithURLPath(urlPath string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.URLPath = urlPath return cfg }) } func WithRetry(rc retry.Config) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.RetryConfig = rc return cfg }) } func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { return newSplitOption(func(cfg Config) Config { cfg.Traces.TLSCfg = tlsCfg.Clone() return cfg }, func(cfg Config) Config { cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) return cfg }) } func WithInsecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Insecure = true return cfg }) } func WithSecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Insecure = false return cfg }) } func WithHeaders(headers map[string]string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Headers = headers return cfg }) } func WithTimeout(duration time.Duration) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Timeout = duration return cfg }) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options_test.go000066400000000000000000000337501452547353200340340ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig import ( "errors" "testing" "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" ) const ( WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9 nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/ 1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH Lhnm4N/QDk5rek0= -----END CERTIFICATE----- ` WeakPrivateKey = ` -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV -----END PRIVATE KEY----- ` ) type env map[string]string func (e *env) getEnv(env string) string { return (*e)[env] } type fileReader map[string][]byte func (f *fileReader) readFile(filename string) ([]byte, error) { if b, ok := (*f)[filename]; ok { return b, nil } return nil, errors.New("file not found") } func TestConfigs(t *testing.T) { tlsCert, err := CreateTLSConfig([]byte(WeakCertificate)) assert.NoError(t, err) tests := []struct { name string opts []GenericOption env env fileReader fileReader asserts func(t *testing.T, c *Config, grpcOption bool) }{ { name: "Test default configs", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.Equal(t, "localhost:4317", c.Traces.Endpoint) } else { assert.Equal(t, "localhost:4318", c.Traces.Endpoint) } assert.Equal(t, NoCompression, c.Traces.Compression) assert.Equal(t, map[string]string(nil), c.Traces.Headers) assert.Equal(t, 10*time.Second, c.Traces.Timeout) }, }, // Endpoint Tests { name: "Test With Endpoint", opts: []GenericOption{ WithEndpoint("someendpoint"), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "someendpoint", c.Traces.Endpoint) }, }, { name: "Test Environment Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.False(t, c.Traces.Insecure) if grpcOption { assert.Equal(t, "env.endpoint/prefix", c.Traces.Endpoint) } else { assert.Equal(t, "env.endpoint", c.Traces.Endpoint) assert.Equal(t, "/prefix/v1/traces", c.Traces.URLPath) } }, }, { name: "Test Environment Signal Specific Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": "http://env.traces.endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.True(t, c.Traces.Insecure) assert.Equal(t, "env.traces.endpoint", c.Traces.Endpoint) if !grpcOption { assert.Equal(t, "/", c.Traces.URLPath) } }, }, { name: "Test Mixed Environment and With Endpoint", opts: []GenericOption{ WithEndpoint("traces_endpoint"), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "traces_endpoint", c.Traces.Endpoint) }, }, { name: "Test Environment Endpoint with HTTP scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, { name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, { name: "Test Environment Endpoint with HTTPS scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, false, c.Traces.Insecure) }, }, { name: "Test Environment Signal Specific Endpoint with uppercase scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": "HtTp://env_traces_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_traces_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, // Certificate tests { name: "Test Default Certificate", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { assert.Nil(t, c.Traces.TLSCfg) } }, }, { name: "Test With Certificate", opts: []GenericOption{ WithTLSClientConfig(tlsCert), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { // TODO: make sure gRPC's credentials actually works assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Signal Specific Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), "invalid_cert": []byte("invalid certificate file."), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Mixed Environment and With Certificate", opts: []GenericOption{}, env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, // Headers tests { name: "Test With Headers", opts: []GenericOption{ WithHeaders(map[string]string{"h1": "v1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1"}, c.Traces.Headers) }, }, { name: "Test Environment Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, { name: "Test Environment Signal Specific Headers", env: map[string]string{ "OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_HEADERS": "h1=v1,h2=v2", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, { name: "Test Mixed Environment and With Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, opts: []GenericOption{}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, // Compression Tests { name: "Test With Compression", opts: []GenericOption{ WithCompression(GzipCompression), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Environment Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Environment Signal Specific Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Mixed Environment and With Compression", opts: []GenericOption{ WithCompression(NoCompression), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, NoCompression, c.Traces.Compression) }, }, // Timeout Tests { name: "Test With Timeout", opts: []GenericOption{ WithTimeout(time.Duration(5 * time.Second)), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, 5*time.Second, c.Traces.Timeout) }, }, { name: "Test Environment Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 15*time.Second) }, }, { name: "Test Environment Signal Specific Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT": "27000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 27*time.Second) }, }, { name: "Test Mixed Environment and With Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT": "27000", }, opts: []GenericOption{ WithTimeout(5 * time.Second), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 5*time.Second) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { origEOR := DefaultEnvOptionsReader DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: tt.env.getEnv, ReadFile: tt.fileReader.readFile, Namespace: "OTEL_EXPORTER_OTLP", } t.Cleanup(func() { DefaultEnvOptionsReader = origEOR }) // Tests Generic options as HTTP Options cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...) tt.asserts(t, &cfg, false) // Tests Generic options as gRPC Options cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...) tt.asserts(t, &cfg, true) }) } } func asHTTPOptions(opts []GenericOption) []HTTPOption { converted := make([]HTTPOption, len(opts)) for i, o := range opts { converted[i] = NewHTTPOption(o.ApplyHTTPOption) } return converted } func asGRPCOptions(opts []GenericOption) []GRPCOption { converted := make([]GRPCOption, len(opts)) for i, o := range opts { converted[i] = NewGRPCOption(o.ApplyGRPCOption) } return converted } func TestCleanPath(t *testing.T) { type args struct { urlPath string defaultPath string } tests := []struct { name string args args want string }{ { name: "clean empty path", args: args{ urlPath: "", defaultPath: "DefaultPath", }, want: "DefaultPath", }, { name: "clean metrics path", args: args{ urlPath: "/prefix/v1/metrics", defaultPath: "DefaultMetricsPath", }, want: "/prefix/v1/metrics", }, { name: "clean traces path", args: args{ urlPath: "https://env_endpoint", defaultPath: "DefaultTracesPath", }, want: "/https:/env_endpoint", }, { name: "spaces trimmed", args: args{ urlPath: " /dir", }, want: "/dir", }, { name: "clean path empty", args: args{ urlPath: "dir/..", defaultPath: "DefaultTracesPath", }, want: "DefaultTracesPath", }, { name: "make absolute", args: args{ urlPath: "dir/a", }, want: "/dir/a", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want { t.Errorf("CleanPath() = %v, want %v", got, tt.want) } }) } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go000066400000000000000000000034741452547353200336770ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" const ( // DefaultCollectorGRPCPort is the default gRPC port of the collector. DefaultCollectorGRPCPort uint16 = 4317 // DefaultCollectorHTTPPort is the default HTTP port of the collector. DefaultCollectorHTTPPort uint16 = 4318 // DefaultCollectorHost is the host address the Exporter will attempt // connect to if no collector address is provided. DefaultCollectorHost string = "localhost" ) // Compression describes the compression used for payloads sent to the // collector. type Compression int const ( // NoCompression tells the driver to send payloads without // compression. NoCompression Compression = iota // GzipCompression tells the driver to send payloads after // compressing them with gzip. GzipCompression ) // Marshaler describes the kind of message format sent to the collector. type Marshaler int const ( // MarshalProto tells the driver to send using the protobuf binary format. MarshalProto Marshaler = iota // MarshalJSON tells the driver to send using json format. MarshalJSON ) opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go000066400000000000000000000023271452547353200321000ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" import ( "crypto/tls" "crypto/x509" "errors" ) // CreateTLSConfig creates a tls.Config from a raw certificate bytes // to verify a server certificate. func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return &tls.Config{ RootCAs: cp, }, nil } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/000077500000000000000000000000001452547353200314745ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/client.go000066400000000000000000000076331452547353200333120ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" import ( "context" "errors" "sync" "testing" "time" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ) func RunExporterShutdownTest(t *testing.T, factory func() otlptrace.Client) { t.Run("testClientStopHonorsTimeout", func(t *testing.T) { testClientStopHonorsTimeout(t, factory()) }) t.Run("testClientStopHonorsCancel", func(t *testing.T) { testClientStopHonorsCancel(t, factory()) }) t.Run("testClientStopNoError", func(t *testing.T) { testClientStopNoError(t, factory()) }) t.Run("testClientStopManyTimes", func(t *testing.T) { testClientStopManyTimes(t, factory()) }) } func initializeExporter(t *testing.T, client otlptrace.Client) *otlptrace.Exporter { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() e, err := otlptrace.New(ctx, client) if err != nil { t.Fatalf("failed to create exporter") } return e } func testClientStopHonorsTimeout(t *testing.T, client otlptrace.Client) { t.Cleanup(func() { // The test is looking for a failed shut down. Call Stop a second time // with an un-expired context to give the client a second chance at // cleaning up. There is not guarantee from the Client interface this // will succeed, therefore, no need to check the error (just give it a // best try). _ = client.Stop(context.Background()) }) e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() <-ctx.Done() if err := e.Shutdown(ctx); !errors.Is(err, context.DeadlineExceeded) { t.Errorf("expected context DeadlineExceeded error, got %v", err) } } func testClientStopHonorsCancel(t *testing.T, client otlptrace.Client) { t.Cleanup(func() { // The test is looking for a failed shut down. Call Stop a second time // with an un-expired context to give the client a second chance at // cleaning up. There is not guarantee from the Client interface this // will succeed, therefore, no need to check the error (just give it a // best try). _ = client.Stop(context.Background()) }) e := initializeExporter(t, client) ctx, cancel := context.WithCancel(context.Background()) cancel() if err := e.Shutdown(ctx); !errors.Is(err, context.Canceled) { t.Errorf("expected context canceled error, got %v", err) } } func testClientStopNoError(t *testing.T, client otlptrace.Client) { e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() if err := e.Shutdown(ctx); err != nil { t.Errorf("shutdown errored: expected nil, got %v", err) } } func testClientStopManyTimes(t *testing.T, client otlptrace.Client) { e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() ch := make(chan struct{}) wg := sync.WaitGroup{} const num int = 20 wg.Add(num) errs := make([]error, num) for i := 0; i < num; i++ { go func(idx int) { defer wg.Done() <-ch errs[idx] = e.Shutdown(ctx) }(i) } close(ch) wg.Wait() for _, err := range errs { if err != nil { t.Errorf("failed to shutdown exporter: %v", err) return } } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/collector.go000066400000000000000000000061661452547353200340220ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" import ( "sort" collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" commonpb "go.opentelemetry.io/proto/otlp/common/v1" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // TracesCollector mocks a collector for the end-to-end testing. type TracesCollector interface { Stop() error GetResourceSpans() []*tracepb.ResourceSpans } // SpansStorage stores the spans. Mock collectors can use it to // store spans they have received. type SpansStorage struct { rsm map[string]*tracepb.ResourceSpans spanCount int } // NewSpansStorage creates a new spans storage. func NewSpansStorage() SpansStorage { return SpansStorage{ rsm: make(map[string]*tracepb.ResourceSpans), } } // AddSpans adds spans to the spans storage. func (s *SpansStorage) AddSpans(request *collectortracepb.ExportTraceServiceRequest) { for _, rs := range request.GetResourceSpans() { rstr := resourceString(rs.Resource) if existingRs, ok := s.rsm[rstr]; !ok { s.rsm[rstr] = rs // TODO (rghetia): Add support for library Info. if len(rs.ScopeSpans) == 0 { rs.ScopeSpans = []*tracepb.ScopeSpans{ { Spans: []*tracepb.Span{}, }, } } s.spanCount += len(rs.ScopeSpans[0].Spans) } else { if len(rs.ScopeSpans) > 0 { newSpans := rs.ScopeSpans[0].GetSpans() existingRs.ScopeSpans[0].Spans = append(existingRs.ScopeSpans[0].Spans, newSpans...) s.spanCount += len(newSpans) } } } } // GetSpans returns the stored spans. func (s *SpansStorage) GetSpans() []*tracepb.Span { spans := make([]*tracepb.Span, 0, s.spanCount) for _, rs := range s.rsm { spans = append(spans, rs.ScopeSpans[0].Spans...) } return spans } // GetResourceSpans returns the stored resource spans. func (s *SpansStorage) GetResourceSpans() []*tracepb.ResourceSpans { rss := make([]*tracepb.ResourceSpans, 0, len(s.rsm)) for _, rs := range s.rsm { rss = append(rss, rs) } return rss } func resourceString(res *resourcepb.Resource) string { sAttrs := sortedAttributes(res.GetAttributes()) rstr := "" for _, attr := range sAttrs { rstr = rstr + attr.String() } return rstr } func sortedAttributes(attrs []*commonpb.KeyValue) []*commonpb.KeyValue { sort.Slice(attrs[:], func(i, j int) bool { return attrs[i].Key < attrs[j].Key }) return attrs } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/data.go000066400000000000000000000047601452547353200327430ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" import ( "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" "go.opentelemetry.io/otel/trace" ) // SingleReadOnlySpan returns a one-element slice with a read-only span. It // may be useful for testing driver's trace export. func SingleReadOnlySpan() []tracesdk.ReadOnlySpan { return tracetest.SpanStubs{ { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9}, SpanID: trace.SpanID{3, 4, 5, 6, 7, 8, 9, 0}, TraceFlags: trace.FlagsSampled, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9}, SpanID: trace.SpanID{1, 2, 3, 4, 5, 6, 7, 8}, TraceFlags: trace.FlagsSampled, }), SpanKind: trace.SpanKindInternal, Name: "foo", StartTime: time.Date(2020, time.December, 8, 20, 23, 0, 0, time.UTC), EndTime: time.Date(2020, time.December, 0, 20, 24, 0, 0, time.UTC), Attributes: []attribute.KeyValue{}, Events: []tracesdk.Event{}, Links: []tracesdk.Link{}, Status: tracesdk.Status{Code: codes.Ok}, DroppedAttributes: 0, DroppedEvents: 0, DroppedLinks: 0, ChildSpanCount: 0, Resource: resource.NewSchemaless(attribute.String("a", "b")), InstrumentationLibrary: instrumentation.Library{ Name: "bar", Version: "0.0.0", }, }, }.Snapshots() } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest/otlptest.go000066400000000000000000000077741452547353200337200ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" import ( "context" "testing" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // RunEndToEndTest can be used by otlptrace.Client tests to validate // themselves. func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, tracesCollector TracesCollector) { pOpts := []sdktrace.TracerProviderOption{ sdktrace.WithSampler(sdktrace.AlwaysSample()), sdktrace.WithBatcher( exp, // add following two options to ensure flush sdktrace.WithBatchTimeout(5*time.Second), sdktrace.WithMaxExportBatchSize(10), ), } tp1 := sdktrace.NewTracerProvider(append(pOpts, sdktrace.WithResource(resource.NewSchemaless( attribute.String("rk1", "rv11)"), attribute.Int64("rk2", 5), )))...) tp2 := sdktrace.NewTracerProvider(append(pOpts, sdktrace.WithResource(resource.NewSchemaless( attribute.String("rk1", "rv12)"), attribute.Float64("rk3", 6.5), )))...) tr1 := tp1.Tracer("test-tracer1") tr2 := tp2.Tracer("test-tracer2") // Now create few spans m := 4 for i := 0; i < m; i++ { _, span := tr1.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() _, span = tr2.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() } func() { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if err := tp1.Shutdown(ctx); err != nil { t.Fatalf("failed to shut down a tracer provider 1: %v", err) } if err := tp2.Shutdown(ctx); err != nil { t.Fatalf("failed to shut down a tracer provider 2: %v", err) } }() // Wait >2 cycles. <-time.After(40 * time.Millisecond) // Now shutdown the exporter ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if err := exp.Shutdown(ctx); err != nil { t.Fatalf("failed to stop the exporter: %v", err) } // Shutdown the collector too so that we can begin // verification checks of expected data back. if err := tracesCollector.Stop(); err != nil { t.Fatalf("failed to stop the mock collector: %v", err) } // Now verify that we only got two resources rss := tracesCollector.GetResourceSpans() if got, want := len(rss), 2; got != want { t.Fatalf("resource span count: got %d, want %d\n", got, want) } // Now verify spans and attributes for each resource span. for _, rs := range rss { if len(rs.ScopeSpans) == 0 { t.Fatalf("zero ScopeSpans") } if got, want := len(rs.ScopeSpans[0].Spans), m; got != want { t.Fatalf("span counts: got %d, want %d", got, want) } attrMap := map[int64]bool{} for _, s := range rs.ScopeSpans[0].Spans { if gotName, want := s.Name, "AlwaysSample"; gotName != want { t.Fatalf("span name: got %s, want %s", gotName, want) } attrMap[s.Attributes[0].Value.Value.(*commonpb.AnyValue_IntValue).IntValue] = true } if got, want := len(attrMap), m; got != want { t.Fatalf("span attribute unique values: got %d want %d", got, want) } for i := 0; i < m; i++ { _, ok := attrMap[int64(i)] if !ok { t.Fatalf("span with attribute %d missing", i) } } } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go000066400000000000000000000042141452547353200321540ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" import "fmt" // PartialSuccess represents the underlying error for all handling // OTLP partial success messages. Use `errors.Is(err, // PartialSuccess{})` to test whether an error passed to the OTel // error handler belongs to this category. type PartialSuccess struct { ErrorMessage string RejectedItems int64 RejectedKind string } var _ error = PartialSuccess{} // Error implements the error interface. func (ps PartialSuccess) Error() string { msg := ps.ErrorMessage if msg == "" { msg = "empty message" } return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) return ok } // TracePartialSuccessError returns an error describing a partial success // response for the trace signal. func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "spans", } } // MetricPartialSuccessError returns an error describing a partial success // response for the metric signal. func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "metric data points", } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess_test.go000066400000000000000000000031221452547353200332100ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess_test.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "errors" "strings" "testing" "github.com/stretchr/testify/require" ) func requireErrorString(t *testing.T, expect string, err error) { t.Helper() require.NotNil(t, err) require.Error(t, err) require.True(t, errors.Is(err, PartialSuccess{})) const pfx = "OTLP partial success: " msg := err.Error() require.True(t, strings.HasPrefix(msg, pfx)) require.Equal(t, expect, msg[len(pfx):]) } func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/000077500000000000000000000000001452547353200277445ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go000066400000000000000000000116001452547353200314360ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package retry provides request retry functionality that can perform // configurable exponential backoff for transient errors and honor any // explicit throttle responses received. package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" import ( "context" "fmt" "time" "github.com/cenkalti/backoff/v4" ) // DefaultConfig are the recommended defaults to use. var DefaultConfig = Config{ Enabled: true, InitialInterval: 5 * time.Second, MaxInterval: 30 * time.Second, MaxElapsedTime: time.Minute, } // Config defines configuration for retrying batches in case of export failure // using an exponential backoff. type Config struct { // Enabled indicates whether to not retry sending batches in case of // export failure. Enabled bool // InitialInterval the time to wait after the first failure before // retrying. InitialInterval time.Duration // MaxInterval is the upper bound on backoff interval. Once this value is // reached the delay between consecutive retries will always be // `MaxInterval`. MaxInterval time.Duration // MaxElapsedTime is the maximum amount of time (including retries) spent // trying to send a request/batch. Once this value is reached, the data // is discarded. MaxElapsedTime time.Duration } // RequestFunc wraps a request with retry logic. type RequestFunc func(context.Context, func(context.Context) error) error // EvaluateFunc returns if an error is retry-able and if an explicit throttle // duration should be honored that was included in the error. // // The function must return true if the error argument is retry-able, // otherwise it must return false for the first return parameter. // // The function must return a non-zero time.Duration if the error contains // explicit throttle duration that should be honored, otherwise it must return // a zero valued time.Duration. type EvaluateFunc func(error) (bool, time.Duration) // RequestFunc returns a RequestFunc using the evaluate function to determine // if requests can be retried and based on the exponential backoff // configuration of c. func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { if !c.Enabled { return func(ctx context.Context, fn func(context.Context) error) error { return fn(ctx) } } return func(ctx context.Context, fn func(context.Context) error) error { // Do not use NewExponentialBackOff since it calls Reset and the code here // must call Reset after changing the InitialInterval (this saves an // unnecessary call to Now). b := &backoff.ExponentialBackOff{ InitialInterval: c.InitialInterval, RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, MaxElapsedTime: c.MaxElapsedTime, Stop: backoff.Stop, Clock: backoff.SystemClock, } b.Reset() for { err := fn(ctx) if err == nil { return nil } retryable, throttle := evaluate(err) if !retryable { return err } bOff := b.NextBackOff() if bOff == backoff.Stop { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. var delay time.Duration if bOff > throttle { delay = bOff } else { elapsed := b.GetElapsedTime() if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { return fmt.Errorf("max retry time would elapse: %w", err) } delay = throttle } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { return fmt.Errorf("%w: %s", ctxErr, err) } } } } // Allow override for testing. var waitFunc = wait // wait takes the caller's context, and the amount of time to wait. It will // return nil if the timer fires before or at the same time as the context's // deadline. This indicates that the call can be retried. func wait(ctx context.Context, delay time.Duration) error { timer := time.NewTimer(delay) defer timer.Stop() select { case <-ctx.Done(): // Handle the case where the timer and context deadline end // simultaneously by prioritizing the timer expiration nil value // response. select { case <-timer.C: default: return ctx.Err() } case <-timer.C: } return nil } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry_test.go000066400000000000000000000145671452547353200325140ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package retry import ( "context" "errors" "math" "sync" "testing" "time" "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" ) func TestWait(t *testing.T) { tests := []struct { ctx context.Context delay time.Duration expected error }{ { ctx: context.Background(), delay: time.Duration(0), }, { ctx: context.Background(), delay: time.Duration(1), }, { ctx: context.Background(), delay: time.Duration(-1), }, { ctx: func() context.Context { ctx, cancel := context.WithCancel(context.Background()) cancel() return ctx }(), // Ensure the timer and context do not end simultaneously. delay: 1 * time.Hour, expected: context.Canceled, }, } for _, test := range tests { err := wait(test.ctx, test.delay) if test.expected == nil { assert.NoError(t, err) } else { assert.ErrorIs(t, err, test.expected) } } } func TestNonRetryableError(t *testing.T) { ev := func(error) (bool, time.Duration) { return false, 0 } reqFunc := Config{ Enabled: true, InitialInterval: 1 * time.Nanosecond, MaxInterval: 1 * time.Nanosecond, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestThrottledRetry(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. throttleDelay, backoffDelay := time.Second, time.Nanosecond ev := func(error) (bool, time.Duration) { // Retry everything with a throttle delay. return true, throttleDelay } reqFunc := Config{ Enabled: true, InitialInterval: backoffDelay, MaxInterval: backoffDelay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, delay time.Duration) error { assert.Equal(t, throttleDelay, delay, "retry not throttled") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } defer func() { waitFunc = origWait }() ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetry(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, d time.Duration) error { delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor) assert.InDelta(t, delay, d, delta, "retry not backoffed") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } t.Cleanup(func() { waitFunc = origWait }) ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetryCanceledContext(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Millisecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 10 * time.Millisecond, }.RequestFunc(ev) ctx, cancel := context.WithCancel(context.Background()) count := 0 cancel() err := reqFunc(ctx, func(context.Context) error { count++ return assert.AnError }) assert.ErrorIs(t, err, context.Canceled) assert.Contains(t, err.Error(), assert.AnError.Error()) assert.Equal(t, 1, count) } func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. tDelay, bDelay := time.Hour, time.Nanosecond ev := func(error) (bool, time.Duration) { return true, tDelay } reqFunc := Config{ Enabled: true, InitialInterval: bDelay, MaxInterval: bDelay, MaxElapsedTime: tDelay - (time.Nanosecond), }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time would elapse: ") } func TestMaxElapsedTime(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, // InitialInterval > MaxElapsedTime means immediate return. InitialInterval: 2 * delay, MaxElapsedTime: delay, }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time elapsed: ") } func TestRetryNotEnabled(t *testing.T) { ev := func(error) (bool, time.Duration) { t.Error("evaluated retry when not enabled") return false, 0 } reqFunc := Config{}.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestRetryConcurrentSafe(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } reqFunc := Config{ Enabled: true, }.RequestFunc(ev) var wg sync.WaitGroup ctx := context.Background() for i := 1; i < 5; i++ { wg.Add(1) go func() { defer wg.Done() var done bool assert.NoError(t, reqFunc(ctx, func(context.Context) error { if !done { done = true return assert.AnError } return nil })) }() } wg.Wait() } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/mock_collector_test.go000066400000000000000000000115131452547353200313510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracegrpc_test import ( "context" "fmt" "net" "sync" "testing" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlptracetest" collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) func makeMockCollector(t *testing.T, mockConfig *mockConfig) *mockCollector { return &mockCollector{ t: t, traceSvc: &mockTraceService{ storage: otlptracetest.NewSpansStorage(), errors: mockConfig.errors, partial: mockConfig.partial, }, stopped: make(chan struct{}), } } type mockTraceService struct { collectortracepb.UnimplementedTraceServiceServer errors []error partial *collectortracepb.ExportTracePartialSuccess requests int mu sync.RWMutex storage otlptracetest.SpansStorage headers metadata.MD exportBlock chan struct{} } func (mts *mockTraceService) getHeaders() metadata.MD { mts.mu.RLock() defer mts.mu.RUnlock() return mts.headers } func (mts *mockTraceService) getSpans() []*tracepb.Span { mts.mu.RLock() defer mts.mu.RUnlock() return mts.storage.GetSpans() } func (mts *mockTraceService) getResourceSpans() []*tracepb.ResourceSpans { mts.mu.RLock() defer mts.mu.RUnlock() return mts.storage.GetResourceSpans() } func (mts *mockTraceService) Export(ctx context.Context, exp *collectortracepb.ExportTraceServiceRequest) (*collectortracepb.ExportTraceServiceResponse, error) { mts.mu.Lock() defer func() { mts.requests++ mts.mu.Unlock() }() if mts.exportBlock != nil { // Do this with the lock held so the mockCollector.Stop does not // abandon cleaning up resources. <-mts.exportBlock } reply := &collectortracepb.ExportTraceServiceResponse{ PartialSuccess: mts.partial, } if mts.requests < len(mts.errors) { idx := mts.requests return reply, mts.errors[idx] } mts.headers, _ = metadata.FromIncomingContext(ctx) mts.storage.AddSpans(exp) return reply, nil } type mockCollector struct { t *testing.T traceSvc *mockTraceService endpoint string stopFunc func() stopOnce sync.Once stopped chan struct{} } type mockConfig struct { errors []error endpoint string partial *collectortracepb.ExportTracePartialSuccess } var _ collectortracepb.TraceServiceServer = (*mockTraceService)(nil) var errAlreadyStopped = fmt.Errorf("already stopped") func (mc *mockCollector) stop() error { err := errAlreadyStopped mc.stopOnce.Do(func() { err = nil if mc.stopFunc != nil { mc.stopFunc() } }) // Wait until gRPC server is down. <-mc.stopped // Getting the lock ensures the traceSvc is done flushing. mc.traceSvc.mu.Lock() defer mc.traceSvc.mu.Unlock() return err } func (mc *mockCollector) Stop() error { return mc.stop() } func (mc *mockCollector) getSpans() []*tracepb.Span { return mc.traceSvc.getSpans() } func (mc *mockCollector) getResourceSpans() []*tracepb.ResourceSpans { return mc.traceSvc.getResourceSpans() } func (mc *mockCollector) GetResourceSpans() []*tracepb.ResourceSpans { return mc.getResourceSpans() } func (mc *mockCollector) getHeaders() metadata.MD { return mc.traceSvc.getHeaders() } // runMockCollector is a helper function to create a mock Collector. func runMockCollector(t *testing.T) *mockCollector { t.Helper() return runMockCollectorAtEndpoint(t, "localhost:0") } func runMockCollectorAtEndpoint(t *testing.T, endpoint string) *mockCollector { t.Helper() return runMockCollectorWithConfig(t, &mockConfig{endpoint: endpoint}) } func runMockCollectorWithConfig(t *testing.T, mockConfig *mockConfig) *mockCollector { t.Helper() ln, err := net.Listen("tcp", mockConfig.endpoint) require.NoError(t, err, "net.Listen") srv := grpc.NewServer() mc := makeMockCollector(t, mockConfig) collectortracepb.RegisterTraceServiceServer(srv, mc.traceSvc) go func() { _ = srv.Serve(ln) close(mc.stopped) }() mc.endpoint = ln.Addr().String() mc.stopFunc = srv.Stop // Wait until gRPC server is up. conn, err := grpc.Dial(mc.endpoint, grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials())) require.NoError(t, err, "grpc.Dial") require.NoError(t, conn.Close(), "conn.Close") return mc } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracegrpc/options.go000066400000000000000000000153551452547353200270160ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" import ( "fmt" "time" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" ) // Option applies an option to the gRPC driver. type Option interface { applyGRPCOption(otlpconfig.Config) otlpconfig.Config } func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption { converted := make([]otlpconfig.GRPCOption, len(opts)) for i, o := range opts { converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption) } return converted } // RetryConfig defines configuration for retrying export of span batches that // failed to be received by the target endpoint. // // This configuration does not define any network retry strategy. That is // entirely handled by the gRPC ClientConn. type RetryConfig retry.Config type wrappedOption struct { otlpconfig.GRPCOption } func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config { return w.ApplyGRPCOption(cfg) } // WithInsecure disables client transport security for the exporter's gRPC // connection just like grpc.WithInsecure() // (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by // default, client security is required unless WithInsecure is used. // // This option has no effect if WithGRPCConn is used. func WithInsecure() Option { return wrappedOption{otlpconfig.WithInsecure()} } // WithEndpoint sets the target endpoint the exporter will connect to. If // unset, localhost:4317 will be used as a default. // // This option has no effect if WithGRPCConn is used. func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} } // WithReconnectionPeriod set the minimum amount of time between connection // attempts to the target endpoint. // // This option has no effect if WithGRPCConn is used. func WithReconnectionPeriod(rp time.Duration) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { cfg.ReconnectionPeriod = rp return cfg })} } func compressorToCompression(compressor string) otlpconfig.Compression { if compressor == "gzip" { return otlpconfig.GzipCompression } otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) return otlpconfig.NoCompression } // WithCompressor sets the compressor for the gRPC client to use when sending // requests. Supported compressor values: "gzip". func WithCompressor(compressor string) Option { return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} } // WithHeaders will send the provided headers with each gRPC requests. func WithHeaders(headers map[string]string) Option { return wrappedOption{otlpconfig.WithHeaders(headers)} } // WithTLSCredentials allows the connection to use TLS credentials when // talking to the server. It takes in grpc.TransportCredentials instead of say // a Certificate file or a tls.Certificate, because the retrieving of these // credentials can be done in many ways e.g. plain file, in code tls.Config or // by certificate rotation, so it is up to the caller to decide what to use. // // This option has no effect if WithGRPCConn is used. func WithTLSCredentials(creds credentials.TransportCredentials) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { cfg.Traces.GRPCCredentials = creds return cfg })} } // WithServiceConfig defines the default gRPC service config used. // // This option has no effect if WithGRPCConn is used. func WithServiceConfig(serviceConfig string) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { cfg.ServiceConfig = serviceConfig return cfg })} } // WithDialOption sets explicit grpc.DialOptions to use when making a // connection. The options here are appended to the internal grpc.DialOptions // used so they will take precedence over any other internal grpc.DialOptions // they might conflict with. // // This option has no effect if WithGRPCConn is used. func WithDialOption(opts ...grpc.DialOption) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { cfg.DialOptions = opts return cfg })} } // WithGRPCConn sets conn as the gRPC ClientConn used for all communication. // // This option takes precedence over any other option that relates to // establishing or persisting a gRPC connection to a target endpoint. Any // other option of those types passed will be ignored. // // It is the callers responsibility to close the passed conn. The client // Shutdown method will not close this connection. func WithGRPCConn(conn *grpc.ClientConn) Option { return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { cfg.GRPCConn = conn return cfg })} } // WithTimeout sets the max amount of time a client will attempt to export a // batch of spans. This takes precedence over any retry settings defined with // WithRetry, once this time limit has been reached the export is abandoned // and the batch of spans is dropped. // // If unset, the default timeout will be set to 10 seconds. func WithTimeout(duration time.Duration) Option { return wrappedOption{otlpconfig.WithTimeout(duration)} } // WithRetry sets the retry policy for transient retryable errors that may be // returned by the target endpoint when exporting a batch of spans. // // If the target endpoint responds with not only a retryable error, but // explicitly returns a backoff time in the response. That time will take // precedence over these settings. // // These settings do not define any network retry strategy. That is entirely // handled by the gRPC ClientConn. // // If unset, the default retry policy will be used. It will retry the export // 5 seconds after receiving a retryable error and increase exponentially // after each error for no more than a total time of 1 minute. func WithRetry(settings RetryConfig) Option { return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))} } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/000077500000000000000000000000001452547353200250075ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/certificate_test.go000066400000000000000000000046741452547353200306720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracehttp_test import ( "bytes" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "math/big" "net" "time" ) type pemCertificate struct { Certificate []byte PrivateKey []byte } // Based on https://golang.org/src/crypto/tls/generate_cert.go, // simplified and weakened. func generateWeakCertificate() (*pemCertificate, error) { priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return nil, err } keyUsage := x509.KeyUsageDigitalSignature notBefore := time.Now() notAfter := notBefore.Add(time.Hour) serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { return nil, err } template := x509.Certificate{ SerialNumber: serialNumber, Subject: pkix.Name{ Organization: []string{"otel-go"}, }, NotBefore: notBefore, NotAfter: notAfter, KeyUsage: keyUsage, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, DNSNames: []string{"localhost"}, IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)}, } derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) if err != nil { return nil, err } certificateBuffer := new(bytes.Buffer) if err := pem.Encode(certificateBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { return nil, err } privDERBytes, err := x509.MarshalPKCS8PrivateKey(priv) if err != nil { return nil, err } privBuffer := new(bytes.Buffer) if err := pem.Encode(privBuffer, &pem.Block{Type: "PRIVATE KEY", Bytes: privDERBytes}); err != nil { return nil, err } return &pemCertificate{ Certificate: certificateBuffer.Bytes(), PrivateKey: privBuffer.Bytes(), }, nil } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/client.go000066400000000000000000000214551452547353200266230ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" import ( "bytes" "compress/gzip" "context" "errors" "fmt" "io" "net" "net/http" "net/url" "strconv" "sync" "time" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) const contentTypeProto = "application/x-protobuf" var gzPool = sync.Pool{ New: func() interface{} { w := gzip.NewWriter(io.Discard) return w }, } // Keep it in sync with golang's DefaultTransport from net/http! We // have our own copy to avoid handling a situation where the // DefaultTransport is overwritten with some different implementation // of http.RoundTripper or it's modified by other package. var ourTransport = &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext, ForceAttemptHTTP2: true, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, } type client struct { name string cfg otlpconfig.SignalConfig generalCfg otlpconfig.Config requestFunc retry.RequestFunc client *http.Client stopCh chan struct{} stopOnce sync.Once } var _ otlptrace.Client = (*client)(nil) // NewClient creates a new HTTP trace client. func NewClient(opts ...Option) otlptrace.Client { cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...) httpClient := &http.Client{ Transport: ourTransport, Timeout: cfg.Traces.Timeout, } if cfg.Traces.TLSCfg != nil { transport := ourTransport.Clone() transport.TLSClientConfig = cfg.Traces.TLSCfg httpClient.Transport = transport } stopCh := make(chan struct{}) return &client{ name: "traces", cfg: cfg.Traces, generalCfg: cfg, requestFunc: cfg.RetryConfig.RequestFunc(evaluate), stopCh: stopCh, client: httpClient, } } // Start does nothing in a HTTP client. func (d *client) Start(ctx context.Context) error { // nothing to do select { case <-ctx.Done(): return ctx.Err() default: } return nil } // Stop shuts down the client and interrupt any in-flight request. func (d *client) Stop(ctx context.Context) error { d.stopOnce.Do(func() { close(d.stopCh) }) select { case <-ctx.Done(): return ctx.Err() default: } return nil } // UploadTraces sends a batch of spans to the collector. func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { pbRequest := &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, } rawRequest, err := proto.Marshal(pbRequest) if err != nil { return err } ctx, cancel := d.contextWithStop(ctx) defer cancel() request, err := d.newRequest(rawRequest) if err != nil { return err } return d.requestFunc(ctx, func(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() default: } request.reset(ctx) resp, err := d.client.Do(request.Request) var urlErr *url.Error if errors.As(err, &urlErr) && urlErr.Temporary() { return newResponseError(http.Header{}) } if err != nil { return err } if resp != nil && resp.Body != nil { defer func() { if err := resp.Body.Close(); err != nil { otel.Handle(err) } }() } switch sc := resp.StatusCode; { case sc >= 200 && sc <= 299: // Success, do not retry. // Read the partial success message, if any. var respData bytes.Buffer if _, err := io.Copy(&respData, resp.Body); err != nil { return err } if respData.Len() == 0 { return nil } if resp.Header.Get("Content-Type") == "application/x-protobuf" { var respProto coltracepb.ExportTraceServiceResponse if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil { return err } if respProto.PartialSuccess != nil { msg := respProto.PartialSuccess.GetErrorMessage() n := respProto.PartialSuccess.GetRejectedSpans() if n != 0 || msg != "" { err := internal.TracePartialSuccessError(n, msg) otel.Handle(err) } } } return nil case sc == http.StatusTooManyRequests, sc == http.StatusBadGateway, sc == http.StatusServiceUnavailable, sc == http.StatusGatewayTimeout: // Retry-able failures. Drain the body to reuse the connection. if _, err := io.Copy(io.Discard, resp.Body); err != nil { otel.Handle(err) } return newResponseError(resp.Header) default: return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status) } }) } func (d *client) newRequest(body []byte) (request, error) { u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} r, err := http.NewRequest(http.MethodPost, u.String(), nil) if err != nil { return request{Request: r}, err } userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() r.Header.Set("User-Agent", userAgent) for k, v := range d.cfg.Headers { r.Header.Set(k, v) } r.Header.Set("Content-Type", contentTypeProto) req := request{Request: r} switch Compression(d.cfg.Compression) { case NoCompression: r.ContentLength = (int64)(len(body)) req.bodyReader = bodyReader(body) case GzipCompression: // Ensure the content length is not used. r.ContentLength = -1 r.Header.Set("Content-Encoding", "gzip") gz := gzPool.Get().(*gzip.Writer) defer gzPool.Put(gz) var b bytes.Buffer gz.Reset(&b) if _, err := gz.Write(body); err != nil { return req, err } // Close needs to be called to ensure body if fully written. if err := gz.Close(); err != nil { return req, err } req.bodyReader = bodyReader(b.Bytes()) } return req, nil } // MarshalLog is the marshaling function used by the logging system to represent this Client. func (d *client) MarshalLog() interface{} { return struct { Type string Endpoint string Insecure bool }{ Type: "otlphttphttp", Endpoint: d.cfg.Endpoint, Insecure: d.cfg.Insecure, } } // bodyReader returns a closure returning a new reader for buf. func bodyReader(buf []byte) func() io.ReadCloser { return func() io.ReadCloser { return io.NopCloser(bytes.NewReader(buf)) } } // request wraps an http.Request with a resettable body reader. type request struct { *http.Request // bodyReader allows the same body to be used for multiple requests. bodyReader func() io.ReadCloser } // reset reinitializes the request Body and uses ctx for the request. func (r *request) reset(ctx context.Context) { r.Body = r.bodyReader() r.Request = r.Request.WithContext(ctx) } // retryableError represents a request failure that can be retried. type retryableError struct { throttle int64 } // newResponseError returns a retryableError and will extract any explicit // throttle delay contained in headers. func newResponseError(header http.Header) error { var rErr retryableError if s, ok := header["Retry-After"]; ok { if t, err := strconv.ParseInt(s[0], 10, 64); err == nil { rErr.throttle = t } } return rErr } func (e retryableError) Error() string { return "retry-able request failure" } // evaluate returns if err is retry-able. If it is and it includes an explicit // throttling delay, that delay is also returned. func evaluate(err error) (bool, time.Duration) { if err == nil { return false, 0 } rErr, ok := err.(retryableError) if !ok { return false, 0 } return true, time.Duration(rErr.throttle) } func (d *client) getScheme() string { if d.cfg.Insecure { return "http" } return "https" } func (d *client) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) { // Unify the parent context Done signal with the client's stop // channel. ctx, cancel := context.WithCancel(ctx) go func(ctx context.Context, cancel context.CancelFunc) { select { case <-ctx.Done(): // Nothing to do, either cancelled or deadline // happened. case <-d.stopCh: cancel() } }(ctx, cancel) return ctx, cancel } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/client_test.go000066400000000000000000000301271452547353200276560ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracehttp_test import ( "context" "errors" "fmt" "net/http" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest" coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" ) const ( relOtherTracesPath = "post/traces/here" otherTracesPath = "/post/traces/here" ) var ( testHeaders = map[string]string{ "Otel-Go-Key-1": "somevalue", "Otel-Go-Key-2": "someothervalue", } customUserAgentHeader = map[string]string{ "user-agent": "custome-user-agent", } ) func TestEndToEnd(t *testing.T) { tests := []struct { name string opts []otlptracehttp.Option mcCfg mockCollectorConfig tls bool }{ { name: "no extra options", opts: nil, }, { name: "with gzip compression", opts: []otlptracehttp.Option{ otlptracehttp.WithCompression(otlptracehttp.GzipCompression), }, }, { name: "retry", opts: []otlptracehttp.Option{ otlptracehttp.WithRetry(otlptracehttp.RetryConfig{ Enabled: true, InitialInterval: time.Nanosecond, MaxInterval: time.Nanosecond, // Do not stop trying. MaxElapsedTime: 0, }), }, mcCfg: mockCollectorConfig{ InjectHTTPStatus: []int{503, 429}, }, }, { name: "retry with gzip compression", opts: []otlptracehttp.Option{ otlptracehttp.WithCompression(otlptracehttp.GzipCompression), otlptracehttp.WithRetry(otlptracehttp.RetryConfig{ Enabled: true, InitialInterval: time.Nanosecond, MaxInterval: time.Nanosecond, // Do not stop trying. MaxElapsedTime: 0, }), }, mcCfg: mockCollectorConfig{ InjectHTTPStatus: []int{503, 502}, }, }, { name: "retry with throttle", opts: []otlptracehttp.Option{ otlptracehttp.WithRetry(otlptracehttp.RetryConfig{ Enabled: true, InitialInterval: time.Nanosecond, MaxInterval: time.Nanosecond, // Do not stop trying. MaxElapsedTime: 0, }), }, mcCfg: mockCollectorConfig{ InjectHTTPStatus: []int{504}, InjectResponseHeader: []map[string]string{ {"Retry-After": "10"}, }, }, }, { name: "with empty paths (forced to defaults)", opts: []otlptracehttp.Option{ otlptracehttp.WithURLPath(""), }, }, { name: "with relative paths", opts: []otlptracehttp.Option{ otlptracehttp.WithURLPath(relOtherTracesPath), }, mcCfg: mockCollectorConfig{ TracesURLPath: otherTracesPath, }, }, { name: "with TLS", opts: nil, mcCfg: mockCollectorConfig{ WithTLS: true, }, tls: true, }, { name: "with extra headers", opts: []otlptracehttp.Option{ otlptracehttp.WithHeaders(testHeaders), }, mcCfg: mockCollectorConfig{ ExpectedHeaders: testHeaders, }, }, { name: "with custom user agent", opts: []otlptracehttp.Option{ otlptracehttp.WithHeaders(customUserAgentHeader), }, mcCfg: mockCollectorConfig{ ExpectedHeaders: customUserAgentHeader, }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mc := runMockCollector(t, tc.mcCfg) defer mc.MustStop(t) allOpts := []otlptracehttp.Option{ otlptracehttp.WithEndpoint(mc.Endpoint()), } if tc.tls { tlsConfig := mc.ClientTLSConfig() require.NotNil(t, tlsConfig) allOpts = append(allOpts, otlptracehttp.WithTLSClientConfig(tlsConfig)) } else { allOpts = append(allOpts, otlptracehttp.WithInsecure()) } allOpts = append(allOpts, tc.opts...) client := otlptracehttp.NewClient(allOpts...) ctx := context.Background() exporter, err := otlptrace.New(ctx, client) if assert.NoError(t, err) { defer func() { assert.NoError(t, exporter.Shutdown(ctx)) }() otlptracetest.RunEndToEndTest(ctx, t, exporter, mc) } }) } } func TestExporterShutdown(t *testing.T) { mc := runMockCollector(t, mockCollectorConfig{}) defer func() { _ = mc.Stop() }() <-time.After(5 * time.Millisecond) otlptracetest.RunExporterShutdownTest(t, func() otlptrace.Client { return otlptracehttp.NewClient( otlptracehttp.WithInsecure(), otlptracehttp.WithEndpoint(mc.endpoint), ) }) } func TestTimeout(t *testing.T) { delay := make(chan struct{}) mcCfg := mockCollectorConfig{Delay: delay} mc := runMockCollector(t, mcCfg) defer mc.MustStop(t) defer func() { close(delay) }() client := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), otlptracehttp.WithTimeout(time.Nanosecond), otlptracehttp.WithRetry(otlptracehttp.RetryConfig{Enabled: false}), ) ctx := context.Background() exporter, err := otlptrace.New(ctx, client) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(ctx)) }() err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan()) assert.ErrorContains(t, err, "retry-able request failure") } func TestNoRetry(t *testing.T) { mc := runMockCollector(t, mockCollectorConfig{ InjectHTTPStatus: []int{http.StatusBadRequest}, }) defer mc.MustStop(t) driver := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), otlptracehttp.WithRetry(otlptracehttp.RetryConfig{ Enabled: true, InitialInterval: 1 * time.Nanosecond, MaxInterval: 1 * time.Nanosecond, // Never stop retry of retry-able status. MaxElapsedTime: 0, }), ) ctx := context.Background() exporter, err := otlptrace.New(ctx, driver) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(ctx)) }() err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan()) assert.Error(t, err) unwrapped := errors.Unwrap(err) assert.Equal(t, fmt.Sprintf("failed to send to http://%s/v1/traces: 400 Bad Request", mc.endpoint), unwrapped.Error()) assert.True(t, strings.HasPrefix(err.Error(), "traces export: ")) assert.Empty(t, mc.GetSpans()) } func TestEmptyData(t *testing.T) { mcCfg := mockCollectorConfig{} mc := runMockCollector(t, mcCfg) defer mc.MustStop(t) driver := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), ) ctx := context.Background() exporter, err := otlptrace.New(ctx, driver) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(ctx)) }() assert.NoError(t, err) err = exporter.ExportSpans(ctx, nil) assert.NoError(t, err) assert.Empty(t, mc.GetSpans()) } func TestCancelledContext(t *testing.T) { mcCfg := mockCollectorConfig{} mc := runMockCollector(t, mcCfg) defer mc.MustStop(t) driver := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), ) ctx, cancel := context.WithCancel(context.Background()) exporter, err := otlptrace.New(ctx, driver) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(context.Background())) }() cancel() err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan()) assert.Error(t, err) assert.Empty(t, mc.GetSpans()) } func TestDeadlineContext(t *testing.T) { statuses := make([]int, 0, 5) for i := 0; i < cap(statuses); i++ { statuses = append(statuses, http.StatusTooManyRequests) } mcCfg := mockCollectorConfig{ InjectHTTPStatus: statuses, } mc := runMockCollector(t, mcCfg) defer mc.MustStop(t) driver := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), otlptracehttp.WithRetry(otlptracehttp.RetryConfig{ Enabled: true, InitialInterval: 1 * time.Hour, MaxInterval: 1 * time.Hour, // Never stop retry of retry-able status. MaxElapsedTime: 0, }), ) ctx := context.Background() exporter, err := otlptrace.New(ctx, driver) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(context.Background())) }() ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan()) assert.Error(t, err) assert.Empty(t, mc.GetSpans()) } func TestStopWhileExportingConcurrentSafe(t *testing.T) { statuses := make([]int, 0, 5) for i := 0; i < cap(statuses); i++ { statuses = append(statuses, http.StatusTooManyRequests) } mcCfg := mockCollectorConfig{ InjectHTTPStatus: statuses, } mc := runMockCollector(t, mcCfg) defer mc.MustStop(t) driver := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), otlptracehttp.WithRetry(otlptracehttp.RetryConfig{ Enabled: true, InitialInterval: 1 * time.Hour, MaxInterval: 1 * time.Hour, // Never stop retry of retry-able status. MaxElapsedTime: 0, }), ) ctx := context.Background() exporter, err := otlptrace.New(ctx, driver) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(ctx)) }() doneCh := make(chan struct{}) go func() { err := exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan()) assert.Error(t, err) assert.Empty(t, mc.GetSpans()) close(doneCh) }() <-time.After(time.Second) err = exporter.Shutdown(ctx) assert.NoError(t, err) <-doneCh } func TestPartialSuccess(t *testing.T) { mcCfg := mockCollectorConfig{ Partial: &coltracepb.ExportTracePartialSuccess{ RejectedSpans: 2, ErrorMessage: "partially successful", }, } mc := runMockCollector(t, mcCfg) defer mc.MustStop(t) driver := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), ) ctx := context.Background() exporter, err := otlptrace.New(ctx, driver) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(context.Background())) }() errs := []error{} otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { errs = append(errs, err) })) err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan()) assert.NoError(t, err) require.Equal(t, 1, len(errs)) require.Contains(t, errs[0].Error(), "partially successful") require.Contains(t, errs[0].Error(), "2 spans rejected") } func TestOtherHTTPSuccess(t *testing.T) { for code := 201; code <= 299; code++ { t.Run(fmt.Sprintf("status_%d", code), func(t *testing.T) { mcCfg := mockCollectorConfig{ InjectHTTPStatus: []int{code}, } mc := runMockCollector(t, mcCfg) defer mc.MustStop(t) driver := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), ) ctx := context.Background() exporter, err := otlptrace.New(ctx, driver) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(context.Background())) }() errs := []error{} otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { errs = append(errs, err) })) err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan()) assert.NoError(t, err) assert.Equal(t, 0, len(errs)) }) } } func TestCollectorRespondingNonProtobufContent(t *testing.T) { mcCfg := mockCollectorConfig{ InjectContentType: "application/octet-stream", } mc := runMockCollector(t, mcCfg) defer mc.MustStop(t) driver := otlptracehttp.NewClient( otlptracehttp.WithEndpoint(mc.Endpoint()), otlptracehttp.WithInsecure(), ) ctx := context.Background() exporter, err := otlptrace.New(ctx, driver) require.NoError(t, err) defer func() { assert.NoError(t, exporter.Shutdown(context.Background())) }() err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan()) assert.NoError(t, err) assert.Len(t, mc.GetSpans(), 1) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/doc.go000066400000000000000000000100731452547353200261040ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package otlptracehttp provides an OTLP span exporter using HTTP with protobuf payloads. By default the telemetry is sent to https://localhost:4318/v1/traces. Exporter should be created using [New]. The environment variables described below can be used for configuration. OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") - target base URL ("/v1/traces" is appended) to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. The configuration can be overridden by OTEL_EXPORTER_OTLP_TRACES_ENDPOINT environment variable and by [WithEndpoint], [WithInsecure] options. OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4318/v1/traces") - target URL to which the exporter sends telemetry. The value must contain a scheme ("http" or "https") and host. The value may additionally contain a port and a path. The value should not contain a query string or fragment. The configuration can be overridden by [WithEndpoint], [WitnInsecure], [WithURLPath] options. OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - key-value pairs used as headers associated with HTTP requests. The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format], except that additional semi-colon delimited metadata is not supported. Example value: "key1=value1,key2=value2". OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. The configuration can be overridden by [WithHeaders] option. OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") - maximum time in milliseconds the OTLP exporter waits for each batch export. OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. The configuration can be overridden by [WithTimeout] option. OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) - the compression strategy the exporter uses to compress the HTTP body. Supported value: "gzip". OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. The configuration can be overridden by [WithCompression] option. OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) - the filepath to the trusted certificate to use when verifying a server's TLS credentials. OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. The configuration can be overridden by [WithTLSClientConfig] option. OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) - the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. The configuration can be overridden by [WithTLSClientConfig] option. OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) - the filepath to the clients private key to use in mTLS communication in PEM format. OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. The configuration can be overridden by [WithTLSClientConfig] option. [W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content */ package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/example_test.go000066400000000000000000000022521452547353200300310ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracehttp_test import ( "context" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" "go.opentelemetry.io/otel/sdk/trace" ) func Example() { ctx := context.Background() exp, err := otlptracehttp.New(ctx) if err != nil { panic(err) } tracerProvider := trace.NewTracerProvider(trace.WithBatcher(exp)) defer func() { if err := tracerProvider.Shutdown(ctx); err != nil { panic(err) } }() otel.SetTracerProvider(tracerProvider) // From here, the tracerProvider can be used by instrumentation to collect // telemetry. } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/exporter.go000066400000000000000000000021431452547353200272060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" import ( "context" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ) // New constructs a new Exporter and starts it. func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) { return otlptrace.New(ctx, NewClient(opts...)) } // NewUnstarted constructs a new Exporter and does not start it. func NewUnstarted(opts ...Option) *otlptrace.Exporter { return otlptrace.NewUnstarted(NewClient(opts...)) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/go.mod000066400000000000000000000027021452547353200261160ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp go 1.20 require ( github.com/cenkalti/backoff/v4 v4.2.1 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 go.opentelemetry.io/proto/otlp v1.0.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/kr/text v0.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.13.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/exporters/otlp/otlptrace => ../ replace go.opentelemetry.io/otel => ../../../.. replace go.opentelemetry.io/otel/sdk => ../../../../sdk replace go.opentelemetry.io/otel/trace => ../../../../trace replace go.opentelemetry.io/otel/metric => ../../../../metric opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/go.sum000066400000000000000000000106051452547353200261440ustar00rootroot00000000000000github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/000077500000000000000000000000001452547353200266235ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/000077500000000000000000000000001452547353200306015ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go000066400000000000000000000131701452547353200331100ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig" import ( "crypto/tls" "crypto/x509" "errors" "fmt" "net/url" "strconv" "strings" "time" "go.opentelemetry.io/otel/internal/global" ) // ConfigFn is the generic function used to set a config. type ConfigFn func(*EnvOptionsReader) // EnvOptionsReader reads the required environment variables. type EnvOptionsReader struct { GetEnv func(string) string ReadFile func(string) ([]byte, error) Namespace string } // Apply runs every ConfigFn. func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { for _, o := range opts { o(e) } } // GetEnvValue gets an OTLP environment variable value of the specified key // using the GetEnv function. // This function prepends the OTLP specified namespace to all key lookups. func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) return v, v != "" } // WithString retrieves the specified config and passes it to ConfigFn as a string. func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(v) } } } // WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. func WithBool(n string, fn func(bool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b := strings.ToLower(v) == "true" fn(b) } } } // WithDuration retrieves the specified config and passes it to ConfigFn as a duration. func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { d, err := strconv.Atoi(v) if err != nil { global.Error(err, "parse duration", "input", v) return } fn(time.Duration(d) * time.Millisecond) } } } // WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(stringToHeader(v)) } } } // WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { u, err := url.Parse(v) if err != nil { global.Error(err, "parse url", "input", v) return } fn(u) } } } // WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b, err := e.ReadFile(v) if err != nil { global.Error(err, "read tls ca cert file", "file", v) return } c, err := createCertPool(b) if err != nil { global.Error(err, "create tls cert pool") return } fn(c) } } } // WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { return func(e *EnvOptionsReader) { vc, okc := e.GetEnvValue(nc) vk, okk := e.GetEnvValue(nk) if !okc || !okk { return } cert, err := e.ReadFile(vc) if err != nil { global.Error(err, "read tls client cert", "file", vc) return } key, err := e.ReadFile(vk) if err != nil { global.Error(err, "read tls client key", "file", vk) return } crt, err := tls.X509KeyPair(cert, key) if err != nil { global.Error(err, "create tls client key pair") return } fn(crt) } } func keyWithNamespace(ns, key string) string { if ns == "" { return key } return fmt.Sprintf("%s_%s", ns, key) } func stringToHeader(value string) map[string]string { headersPairs := strings.Split(value, ",") headers := make(map[string]string) for _, header := range headersPairs { n, v, found := strings.Cut(header, "=") if !found { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } name, err := url.PathUnescape(n) if err != nil { global.Error(err, "escape header key", "key", n) continue } trimmedName := strings.TrimSpace(name) value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) continue } trimmedValue := strings.TrimSpace(value) headers[trimmedName] = trimmedValue } return headers } func createCertPool(certBytes []byte) (*x509.CertPool, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return cp, nil } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig_test.go000066400000000000000000000260371452547353200341550ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig import ( "crypto/tls" "crypto/x509" "errors" "net/url" "testing" "time" "github.com/stretchr/testify/assert" ) const WeakKey = ` -----BEGIN EC PRIVATE KEY----- MHcCAQEEIEbrSPmnlSOXvVzxCyv+VR3a0HDeUTvOcqrdssZ2k4gFoAoGCCqGSM49 AwEHoUQDQgAEDMTfv75J315C3K9faptS9iythKOMEeV/Eep73nWX531YAkmmwBSB 2dXRD/brsgLnfG57WEpxZuY7dPRbxu33BA== -----END EC PRIVATE KEY----- ` const WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBjjCCATWgAwIBAgIUKQSMC66MUw+kPp954ZYOcyKAQDswCgYIKoZIzj0EAwIw EjEQMA4GA1UECgwHb3RlbC1nbzAeFw0yMjEwMTkwMDA5MTlaFw0yMzEwMTkwMDA5 MTlaMBIxEDAOBgNVBAoMB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC AAQMxN+/vknfXkLcr19qm1L2LK2Eo4wR5X8R6nvedZfnfVgCSabAFIHZ1dEP9uuy Aud8bntYSnFm5jt09FvG7fcEo2kwZzAdBgNVHQ4EFgQUicGuhnTTkYLZwofXMNLK SHFeCWgwHwYDVR0jBBgwFoAUicGuhnTTkYLZwofXMNLKSHFeCWgwDwYDVR0TAQH/ BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIg Lfma8FnnxeSOi6223AsFfYwsNZ2RderNsQrS0PjEHb0CIBkrWacqARUAu7uT4cGu jVcIxYQqhId5L8p/mAv2PWZS -----END CERTIFICATE----- ` type testOption struct { TestString string TestBool bool TestDuration time.Duration TestHeaders map[string]string TestURL *url.URL TestTLS *tls.Config } func TestEnvConfig(t *testing.T) { parsedURL, err := url.Parse("https://example.com") assert.NoError(t, err) options := []testOption{} for _, testcase := range []struct { name string reader EnvOptionsReader configs []ConfigFn expectedOptions []testOption }{ { name: "with no namespace and a matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HOLA", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a namespace and a matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "MY_NAMESPACE_HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "true" } else if n == "WORLD" { return "false" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), WithBool("WORLD", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: true, }, { TestBool: false, }, }, }, { name: "with an invalid bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: false, }, }, }, { name: "with a duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "60" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{ { TestDuration: 60_000_000, // 60 milliseconds }, }, }, { name: "with an invalid duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{}, }, { name: "with headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "userId=42,userName=alice" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{ "userId": "42", "userName": "alice", }, }, }, }, { name: "with invalid headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{}, }, }, }, { name: "with URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "https://example.com" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{ { TestURL: parsedURL, }, }, }, { name: "with invalid URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "i nvalid://url" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{}, }, } { t.Run(testcase.name, func(t *testing.T) { testcase.reader.Apply(testcase.configs...) assert.Equal(t, testcase.expectedOptions, options) options = []testOption{} }) } } func TestWithTLSConfig(t *testing.T) { pool, err := createCertPool([]byte(WeakCertificate)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { if n == "CERTIFICATE" { return "/path/cert.pem" } return "" }, ReadFile: func(p string) ([]byte, error) { if p == "/path/cert.pem" { return []byte(WeakCertificate), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithCertPool("CERTIFICATE", func(cp *x509.CertPool) { option = testOption{TestTLS: &tls.Config{RootCAs: cp}} }), ) // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, pool.Subjects(), option.TestTLS.RootCAs.Subjects()) } func TestWithClientCert(t *testing.T) { cert, err := tls.X509KeyPair([]byte(WeakCertificate), []byte(WeakKey)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { switch n { case "CLIENT_CERTIFICATE": return "/path/tls.crt" case "CLIENT_KEY": return "/path/tls.key" } return "" }, ReadFile: func(n string) ([]byte, error) { switch n { case "/path/tls.crt": return []byte(WeakCertificate), nil case "/path/tls.key": return []byte(WeakKey), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Equal(t, cert, option.TestTLS.Certificates[0]) reader.ReadFile = func(s string) ([]byte, error) { return nil, errors.New("oops") } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) reader.GetEnv = func(s string) string { return "" } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) } func TestStringToHeader(t *testing.T) { tests := []struct { name string value string want map[string]string }{ { name: "simple test", value: "userId=alice", want: map[string]string{"userId": "alice"}, }, { name: "simple test with spaces", value: " userId = alice ", want: map[string]string{"userId": "alice"}, }, { name: "simple header conforms to RFC 3986 spec", value: " userId = alice+test ", want: map[string]string{"userId": "alice+test"}, }, { name: "multiple headers encoded", value: "userId=alice,serverNode=DF%3A28,isProduction=false", want: map[string]string{ "userId": "alice", "serverNode": "DF:28", "isProduction": "false", }, }, { name: "multiple headers encoded per RFC 3986 spec", value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test", want: map[string]string{ "userId": "alice+test", "serverNode": "DF:28", "isProduction": "false", "namespace": "localhost/test", }, }, { name: "invalid headers format", value: "userId:alice", want: map[string]string{}, }, { name: "invalid key", value: "%XX=missing,userId=alice", want: map[string]string{ "userId": "alice", }, }, { name: "invalid value", value: "missing=%XX,userId=alice", want: map[string]string{ "userId": "alice", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.want, stringToHeader(tt.value)) }) } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/gen.go000066400000000000000000000060471452547353200277320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/envconfig.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry\"}" --out=otlpconfig/options.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig\"}" --out=otlpconfig/options_test.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go //go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/000077500000000000000000000000001452547353200307675ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/envconfig.go000066400000000000000000000124301452547353200332740ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" import ( "crypto/tls" "crypto/x509" "net/url" "os" "path" "strings" "time" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig" ) // DefaultEnvOptionsReader is the default environments reader. var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: os.Getenv, ReadFile: os.ReadFile, Namespace: "OTEL_EXPORTER_OTLP", } // ApplyGRPCEnvConfigs applies the env configurations for gRPC. func ApplyGRPCEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } return cfg } // ApplyHTTPEnvConfigs applies the env configurations for HTTP. func ApplyHTTPEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } return cfg } func getOptionsFromEnv() []GenericOption { opts := []GenericOption{} tlsConf := &tls.Config{} DefaultEnvOptionsReader.Apply( envconfig.WithURL("ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Traces.Endpoint = u.Host // For OTLP/HTTP endpoint URLs without a per-signal // configuration, the passed endpoint is used as a base URL // and the signals are sent to these paths relative to that. cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) return cfg }, withEndpointForGRPC(u))) }), envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Traces.Endpoint = u.Host // For endpoint URLs for OTLP/HTTP per-signal variables, the // URL MUST be used as-is without any modification. The only // exception is that if an URL contains no path part, the root // path / MUST be used. path := u.Path if path == "" { path = "/" } cfg.Traces.URLPath = path return cfg }, withEndpointForGRPC(u))) }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), ) return opts } func withEndpointScheme(u *url.URL) GenericOption { switch strings.ToLower(u.Scheme) { case "http", "unix": return WithInsecure() default: return WithSecure() } } func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { return func(cfg Config) Config { // For OTLP/gRPC endpoints, this is the target to which the // exporter is going to send telemetry. cfg.Traces.Endpoint = path.Join(u.Host, u.Path) return cfg } } // WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { cp := NoCompression if v == "gzip" { cp = GzipCompression } fn(cp) } } } // revive:disable-next-line:flag-parameter func withInsecure(b bool) GenericOption { if b { return WithInsecure() } return WithSecure() } func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if c.RootCAs != nil || len(c.Certificates) > 0 { fn(c) } } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go000066400000000000000000000211501452547353200330100ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" import ( "crypto/tls" "fmt" "path" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" ) const ( // DefaultTracesPath is a default URL path for endpoint that // receives spans. DefaultTracesPath string = "/v1/traces" // DefaultTimeout is a default max waiting time for the backend to process // each span batch. DefaultTimeout time.Duration = 10 * time.Second ) type ( SignalConfig struct { Endpoint string Insecure bool TLSCfg *tls.Config Headers map[string]string Compression Compression Timeout time.Duration URLPath string // gRPC configurations GRPCCredentials credentials.TransportCredentials } Config struct { // Signal specific configurations Traces SignalConfig RetryConfig retry.Config // gRPC configurations ReconnectionPeriod time.Duration ServiceConfig string DialOptions []grpc.DialOption GRPCConn *grpc.ClientConn } ) // NewHTTPConfig returns a new Config with all settings applied from opts and // any unset setting using the default HTTP config values. func NewHTTPConfig(opts ...HTTPOption) Config { cfg := Config{ Traces: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), URLPath: DefaultTracesPath, Compression: NoCompression, Timeout: DefaultTimeout, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyHTTPEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) return cfg } // cleanPath returns a path with all spaces trimmed and all redundancies // removed. If urlPath is empty or cleaning it results in an empty string, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { tmp := path.Clean(strings.TrimSpace(urlPath)) if tmp == "." { return defaultPath } if !path.IsAbs(tmp) { tmp = fmt.Sprintf("/%s", tmp) } return tmp } // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() cfg := Config{ Traces: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), URLPath: DefaultTracesPath, Compression: NoCompression, Timeout: DefaultTimeout, }, RetryConfig: retry.DefaultConfig, DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } // Priroritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) } else { // Default to using the host's root CA. creds := credentials.NewTLS(nil) cfg.Traces.GRPCCredentials = creds cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) } if cfg.Traces.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, MinConnectTimeout: cfg.ReconnectionPeriod, } cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) } return cfg } type ( // GenericOption applies an option to the HTTP or gRPC driver. GenericOption interface { ApplyHTTPOption(Config) Config ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // HTTPOption applies an option to the HTTP driver. HTTPOption interface { ApplyHTTPOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // GRPCOption applies an option to the gRPC driver. GRPCOption interface { ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } ) // genericOption is an option that applies the same logic // for both gRPC and HTTP. type genericOption struct { fn func(Config) Config } func (g *genericOption) ApplyGRPCOption(cfg Config) Config { return g.fn(cfg) } func (g *genericOption) ApplyHTTPOption(cfg Config) Config { return g.fn(cfg) } func (genericOption) private() {} func newGenericOption(fn func(cfg Config) Config) GenericOption { return &genericOption{fn: fn} } // splitOption is an option that applies different logics // for gRPC and HTTP. type splitOption struct { httpFn func(Config) Config grpcFn func(Config) Config } func (g *splitOption) ApplyGRPCOption(cfg Config) Config { return g.grpcFn(cfg) } func (g *splitOption) ApplyHTTPOption(cfg Config) Config { return g.httpFn(cfg) } func (splitOption) private() {} func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { return &splitOption{httpFn: httpFn, grpcFn: grpcFn} } // httpOption is an option that is only applied to the HTTP driver. type httpOption struct { fn func(Config) Config } func (h *httpOption) ApplyHTTPOption(cfg Config) Config { return h.fn(cfg) } func (httpOption) private() {} func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { return &httpOption{fn: fn} } // grpcOption is an option that is only applied to the gRPC driver. type grpcOption struct { fn func(Config) Config } func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { return h.fn(cfg) } func (grpcOption) private() {} func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { return &grpcOption{fn: fn} } // Generic Options func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint return cfg }) } func WithCompression(compression Compression) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Compression = compression return cfg }) } func WithURLPath(urlPath string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.URLPath = urlPath return cfg }) } func WithRetry(rc retry.Config) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.RetryConfig = rc return cfg }) } func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { return newSplitOption(func(cfg Config) Config { cfg.Traces.TLSCfg = tlsCfg.Clone() return cfg }, func(cfg Config) Config { cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) return cfg }) } func WithInsecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Insecure = true return cfg }) } func WithSecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Insecure = false return cfg }) } func WithHeaders(headers map[string]string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Headers = headers return cfg }) } func WithTimeout(duration time.Duration) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Timeout = duration return cfg }) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options_test.go000066400000000000000000000337501452547353200340600ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig import ( "errors" "testing" "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig" ) const ( WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9 nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/ 1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH Lhnm4N/QDk5rek0= -----END CERTIFICATE----- ` WeakPrivateKey = ` -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV -----END PRIVATE KEY----- ` ) type env map[string]string func (e *env) getEnv(env string) string { return (*e)[env] } type fileReader map[string][]byte func (f *fileReader) readFile(filename string) ([]byte, error) { if b, ok := (*f)[filename]; ok { return b, nil } return nil, errors.New("file not found") } func TestConfigs(t *testing.T) { tlsCert, err := CreateTLSConfig([]byte(WeakCertificate)) assert.NoError(t, err) tests := []struct { name string opts []GenericOption env env fileReader fileReader asserts func(t *testing.T, c *Config, grpcOption bool) }{ { name: "Test default configs", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.Equal(t, "localhost:4317", c.Traces.Endpoint) } else { assert.Equal(t, "localhost:4318", c.Traces.Endpoint) } assert.Equal(t, NoCompression, c.Traces.Compression) assert.Equal(t, map[string]string(nil), c.Traces.Headers) assert.Equal(t, 10*time.Second, c.Traces.Timeout) }, }, // Endpoint Tests { name: "Test With Endpoint", opts: []GenericOption{ WithEndpoint("someendpoint"), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "someendpoint", c.Traces.Endpoint) }, }, { name: "Test Environment Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.False(t, c.Traces.Insecure) if grpcOption { assert.Equal(t, "env.endpoint/prefix", c.Traces.Endpoint) } else { assert.Equal(t, "env.endpoint", c.Traces.Endpoint) assert.Equal(t, "/prefix/v1/traces", c.Traces.URLPath) } }, }, { name: "Test Environment Signal Specific Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": "http://env.traces.endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.True(t, c.Traces.Insecure) assert.Equal(t, "env.traces.endpoint", c.Traces.Endpoint) if !grpcOption { assert.Equal(t, "/", c.Traces.URLPath) } }, }, { name: "Test Mixed Environment and With Endpoint", opts: []GenericOption{ WithEndpoint("traces_endpoint"), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "traces_endpoint", c.Traces.Endpoint) }, }, { name: "Test Environment Endpoint with HTTP scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, { name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, { name: "Test Environment Endpoint with HTTPS scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, false, c.Traces.Insecure) }, }, { name: "Test Environment Signal Specific Endpoint with uppercase scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": "HtTp://env_traces_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_traces_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, // Certificate tests { name: "Test Default Certificate", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { assert.Nil(t, c.Traces.TLSCfg) } }, }, { name: "Test With Certificate", opts: []GenericOption{ WithTLSClientConfig(tlsCert), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { // TODO: make sure gRPC's credentials actually works assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Signal Specific Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), "invalid_cert": []byte("invalid certificate file."), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Mixed Environment and With Certificate", opts: []GenericOption{}, env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, // Headers tests { name: "Test With Headers", opts: []GenericOption{ WithHeaders(map[string]string{"h1": "v1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1"}, c.Traces.Headers) }, }, { name: "Test Environment Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, { name: "Test Environment Signal Specific Headers", env: map[string]string{ "OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_HEADERS": "h1=v1,h2=v2", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, { name: "Test Mixed Environment and With Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, opts: []GenericOption{}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, // Compression Tests { name: "Test With Compression", opts: []GenericOption{ WithCompression(GzipCompression), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Environment Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Environment Signal Specific Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Mixed Environment and With Compression", opts: []GenericOption{ WithCompression(NoCompression), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, NoCompression, c.Traces.Compression) }, }, // Timeout Tests { name: "Test With Timeout", opts: []GenericOption{ WithTimeout(time.Duration(5 * time.Second)), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, 5*time.Second, c.Traces.Timeout) }, }, { name: "Test Environment Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 15*time.Second) }, }, { name: "Test Environment Signal Specific Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT": "27000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 27*time.Second) }, }, { name: "Test Mixed Environment and With Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT": "27000", }, opts: []GenericOption{ WithTimeout(5 * time.Second), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 5*time.Second) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { origEOR := DefaultEnvOptionsReader DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: tt.env.getEnv, ReadFile: tt.fileReader.readFile, Namespace: "OTEL_EXPORTER_OTLP", } t.Cleanup(func() { DefaultEnvOptionsReader = origEOR }) // Tests Generic options as HTTP Options cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...) tt.asserts(t, &cfg, false) // Tests Generic options as gRPC Options cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...) tt.asserts(t, &cfg, true) }) } } func asHTTPOptions(opts []GenericOption) []HTTPOption { converted := make([]HTTPOption, len(opts)) for i, o := range opts { converted[i] = NewHTTPOption(o.ApplyHTTPOption) } return converted } func asGRPCOptions(opts []GenericOption) []GRPCOption { converted := make([]GRPCOption, len(opts)) for i, o := range opts { converted[i] = NewGRPCOption(o.ApplyGRPCOption) } return converted } func TestCleanPath(t *testing.T) { type args struct { urlPath string defaultPath string } tests := []struct { name string args args want string }{ { name: "clean empty path", args: args{ urlPath: "", defaultPath: "DefaultPath", }, want: "DefaultPath", }, { name: "clean metrics path", args: args{ urlPath: "/prefix/v1/metrics", defaultPath: "DefaultMetricsPath", }, want: "/prefix/v1/metrics", }, { name: "clean traces path", args: args{ urlPath: "https://env_endpoint", defaultPath: "DefaultTracesPath", }, want: "/https:/env_endpoint", }, { name: "spaces trimmed", args: args{ urlPath: " /dir", }, want: "/dir", }, { name: "clean path empty", args: args{ urlPath: "dir/..", defaultPath: "DefaultTracesPath", }, want: "DefaultTracesPath", }, { name: "make absolute", args: args{ urlPath: "dir/a", }, want: "/dir/a", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want { t.Errorf("CleanPath() = %v, want %v", got, tt.want) } }) } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/optiontypes.go000066400000000000000000000034741452547353200337230ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" const ( // DefaultCollectorGRPCPort is the default gRPC port of the collector. DefaultCollectorGRPCPort uint16 = 4317 // DefaultCollectorHTTPPort is the default HTTP port of the collector. DefaultCollectorHTTPPort uint16 = 4318 // DefaultCollectorHost is the host address the Exporter will attempt // connect to if no collector address is provided. DefaultCollectorHost string = "localhost" ) // Compression describes the compression used for payloads sent to the // collector. type Compression int const ( // NoCompression tells the driver to send payloads without // compression. NoCompression Compression = iota // GzipCompression tells the driver to send payloads after // compressing them with gzip. GzipCompression ) // Marshaler describes the kind of message format sent to the collector. type Marshaler int const ( // MarshalProto tells the driver to send using the protobuf binary format. MarshalProto Marshaler = iota // MarshalJSON tells the driver to send using json format. MarshalJSON ) opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/tls.go000066400000000000000000000023271452547353200321240ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" import ( "crypto/tls" "crypto/x509" "errors" ) // CreateTLSConfig creates a tls.Config from a raw certificate bytes // to verify a server certificate. func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return &tls.Config{ RootCAs: cp, }, nil } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/000077500000000000000000000000001452547353200315205ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/client.go000066400000000000000000000076331452547353200333360ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest" import ( "context" "errors" "sync" "testing" "time" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ) func RunExporterShutdownTest(t *testing.T, factory func() otlptrace.Client) { t.Run("testClientStopHonorsTimeout", func(t *testing.T) { testClientStopHonorsTimeout(t, factory()) }) t.Run("testClientStopHonorsCancel", func(t *testing.T) { testClientStopHonorsCancel(t, factory()) }) t.Run("testClientStopNoError", func(t *testing.T) { testClientStopNoError(t, factory()) }) t.Run("testClientStopManyTimes", func(t *testing.T) { testClientStopManyTimes(t, factory()) }) } func initializeExporter(t *testing.T, client otlptrace.Client) *otlptrace.Exporter { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() e, err := otlptrace.New(ctx, client) if err != nil { t.Fatalf("failed to create exporter") } return e } func testClientStopHonorsTimeout(t *testing.T, client otlptrace.Client) { t.Cleanup(func() { // The test is looking for a failed shut down. Call Stop a second time // with an un-expired context to give the client a second chance at // cleaning up. There is not guarantee from the Client interface this // will succeed, therefore, no need to check the error (just give it a // best try). _ = client.Stop(context.Background()) }) e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() <-ctx.Done() if err := e.Shutdown(ctx); !errors.Is(err, context.DeadlineExceeded) { t.Errorf("expected context DeadlineExceeded error, got %v", err) } } func testClientStopHonorsCancel(t *testing.T, client otlptrace.Client) { t.Cleanup(func() { // The test is looking for a failed shut down. Call Stop a second time // with an un-expired context to give the client a second chance at // cleaning up. There is not guarantee from the Client interface this // will succeed, therefore, no need to check the error (just give it a // best try). _ = client.Stop(context.Background()) }) e := initializeExporter(t, client) ctx, cancel := context.WithCancel(context.Background()) cancel() if err := e.Shutdown(ctx); !errors.Is(err, context.Canceled) { t.Errorf("expected context canceled error, got %v", err) } } func testClientStopNoError(t *testing.T, client otlptrace.Client) { e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() if err := e.Shutdown(ctx); err != nil { t.Errorf("shutdown errored: expected nil, got %v", err) } } func testClientStopManyTimes(t *testing.T, client otlptrace.Client) { e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() ch := make(chan struct{}) wg := sync.WaitGroup{} const num int = 20 wg.Add(num) errs := make([]error, num) for i := 0; i < num; i++ { go func(idx int) { defer wg.Done() <-ch errs[idx] = e.Shutdown(ctx) }(i) } close(ch) wg.Wait() for _, err := range errs { if err != nil { t.Errorf("failed to shutdown exporter: %v", err) return } } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/collector.go000066400000000000000000000061661452547353200340460ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest" import ( "sort" collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" commonpb "go.opentelemetry.io/proto/otlp/common/v1" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // TracesCollector mocks a collector for the end-to-end testing. type TracesCollector interface { Stop() error GetResourceSpans() []*tracepb.ResourceSpans } // SpansStorage stores the spans. Mock collectors can use it to // store spans they have received. type SpansStorage struct { rsm map[string]*tracepb.ResourceSpans spanCount int } // NewSpansStorage creates a new spans storage. func NewSpansStorage() SpansStorage { return SpansStorage{ rsm: make(map[string]*tracepb.ResourceSpans), } } // AddSpans adds spans to the spans storage. func (s *SpansStorage) AddSpans(request *collectortracepb.ExportTraceServiceRequest) { for _, rs := range request.GetResourceSpans() { rstr := resourceString(rs.Resource) if existingRs, ok := s.rsm[rstr]; !ok { s.rsm[rstr] = rs // TODO (rghetia): Add support for library Info. if len(rs.ScopeSpans) == 0 { rs.ScopeSpans = []*tracepb.ScopeSpans{ { Spans: []*tracepb.Span{}, }, } } s.spanCount += len(rs.ScopeSpans[0].Spans) } else { if len(rs.ScopeSpans) > 0 { newSpans := rs.ScopeSpans[0].GetSpans() existingRs.ScopeSpans[0].Spans = append(existingRs.ScopeSpans[0].Spans, newSpans...) s.spanCount += len(newSpans) } } } } // GetSpans returns the stored spans. func (s *SpansStorage) GetSpans() []*tracepb.Span { spans := make([]*tracepb.Span, 0, s.spanCount) for _, rs := range s.rsm { spans = append(spans, rs.ScopeSpans[0].Spans...) } return spans } // GetResourceSpans returns the stored resource spans. func (s *SpansStorage) GetResourceSpans() []*tracepb.ResourceSpans { rss := make([]*tracepb.ResourceSpans, 0, len(s.rsm)) for _, rs := range s.rsm { rss = append(rss, rs) } return rss } func resourceString(res *resourcepb.Resource) string { sAttrs := sortedAttributes(res.GetAttributes()) rstr := "" for _, attr := range sAttrs { rstr = rstr + attr.String() } return rstr } func sortedAttributes(attrs []*commonpb.KeyValue) []*commonpb.KeyValue { sort.Slice(attrs[:], func(i, j int) bool { return attrs[i].Key < attrs[j].Key }) return attrs } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/data.go000066400000000000000000000047601452547353200327670ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest" import ( "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" "go.opentelemetry.io/otel/trace" ) // SingleReadOnlySpan returns a one-element slice with a read-only span. It // may be useful for testing driver's trace export. func SingleReadOnlySpan() []tracesdk.ReadOnlySpan { return tracetest.SpanStubs{ { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9}, SpanID: trace.SpanID{3, 4, 5, 6, 7, 8, 9, 0}, TraceFlags: trace.FlagsSampled, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9}, SpanID: trace.SpanID{1, 2, 3, 4, 5, 6, 7, 8}, TraceFlags: trace.FlagsSampled, }), SpanKind: trace.SpanKindInternal, Name: "foo", StartTime: time.Date(2020, time.December, 8, 20, 23, 0, 0, time.UTC), EndTime: time.Date(2020, time.December, 0, 20, 24, 0, 0, time.UTC), Attributes: []attribute.KeyValue{}, Events: []tracesdk.Event{}, Links: []tracesdk.Link{}, Status: tracesdk.Status{Code: codes.Ok}, DroppedAttributes: 0, DroppedEvents: 0, DroppedLinks: 0, ChildSpanCount: 0, Resource: resource.NewSchemaless(attribute.String("a", "b")), InstrumentationLibrary: instrumentation.Library{ Name: "bar", Version: "0.0.0", }, }, }.Snapshots() } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest/otlptest.go000066400000000000000000000077741452547353200337440ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest" import ( "context" "testing" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // RunEndToEndTest can be used by otlptrace.Client tests to validate // themselves. func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, tracesCollector TracesCollector) { pOpts := []sdktrace.TracerProviderOption{ sdktrace.WithSampler(sdktrace.AlwaysSample()), sdktrace.WithBatcher( exp, // add following two options to ensure flush sdktrace.WithBatchTimeout(5*time.Second), sdktrace.WithMaxExportBatchSize(10), ), } tp1 := sdktrace.NewTracerProvider(append(pOpts, sdktrace.WithResource(resource.NewSchemaless( attribute.String("rk1", "rv11)"), attribute.Int64("rk2", 5), )))...) tp2 := sdktrace.NewTracerProvider(append(pOpts, sdktrace.WithResource(resource.NewSchemaless( attribute.String("rk1", "rv12)"), attribute.Float64("rk3", 6.5), )))...) tr1 := tp1.Tracer("test-tracer1") tr2 := tp2.Tracer("test-tracer2") // Now create few spans m := 4 for i := 0; i < m; i++ { _, span := tr1.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() _, span = tr2.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() } func() { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if err := tp1.Shutdown(ctx); err != nil { t.Fatalf("failed to shut down a tracer provider 1: %v", err) } if err := tp2.Shutdown(ctx); err != nil { t.Fatalf("failed to shut down a tracer provider 2: %v", err) } }() // Wait >2 cycles. <-time.After(40 * time.Millisecond) // Now shutdown the exporter ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if err := exp.Shutdown(ctx); err != nil { t.Fatalf("failed to stop the exporter: %v", err) } // Shutdown the collector too so that we can begin // verification checks of expected data back. if err := tracesCollector.Stop(); err != nil { t.Fatalf("failed to stop the mock collector: %v", err) } // Now verify that we only got two resources rss := tracesCollector.GetResourceSpans() if got, want := len(rss), 2; got != want { t.Fatalf("resource span count: got %d, want %d\n", got, want) } // Now verify spans and attributes for each resource span. for _, rs := range rss { if len(rs.ScopeSpans) == 0 { t.Fatalf("zero ScopeSpans") } if got, want := len(rs.ScopeSpans[0].Spans), m; got != want { t.Fatalf("span counts: got %d, want %d", got, want) } attrMap := map[int64]bool{} for _, s := range rs.ScopeSpans[0].Spans { if gotName, want := s.Name, "AlwaysSample"; gotName != want { t.Fatalf("span name: got %s, want %s", gotName, want) } attrMap[s.Attributes[0].Value.Value.(*commonpb.AnyValue_IntValue).IntValue] = true } if got, want := len(attrMap), m; got != want { t.Fatalf("span attribute unique values: got %d want %d", got, want) } for i := 0; i < m; i++ { _, ok := attrMap[int64(i)] if !ok { t.Fatalf("span with attribute %d missing", i) } } } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess.go000066400000000000000000000042141452547353200322000ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal" import "fmt" // PartialSuccess represents the underlying error for all handling // OTLP partial success messages. Use `errors.Is(err, // PartialSuccess{})` to test whether an error passed to the OTel // error handler belongs to this category. type PartialSuccess struct { ErrorMessage string RejectedItems int64 RejectedKind string } var _ error = PartialSuccess{} // Error implements the error interface. func (ps PartialSuccess) Error() string { msg := ps.ErrorMessage if msg == "" { msg = "empty message" } return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) return ok } // TracePartialSuccessError returns an error describing a partial success // response for the trace signal. func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "spans", } } // MetricPartialSuccessError returns an error describing a partial success // response for the metric signal. func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "metric data points", } } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/partialsuccess_test.go000066400000000000000000000031221452547353200332340ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess_test.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "errors" "strings" "testing" "github.com/stretchr/testify/require" ) func requireErrorString(t *testing.T, expect string, err error) { t.Helper() require.NotNil(t, err) require.Error(t, err) require.True(t, errors.Is(err, PartialSuccess{})) const pfx = "OTLP partial success: " msg := err.Error() require.True(t, strings.HasPrefix(msg, pfx)) require.Equal(t, expect, msg[len(pfx):]) } func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/retry/000077500000000000000000000000001452547353200277705ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry.go000066400000000000000000000116001452547353200314620ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package retry provides request retry functionality that can perform // configurable exponential backoff for transient errors and honor any // explicit throttle responses received. package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" import ( "context" "fmt" "time" "github.com/cenkalti/backoff/v4" ) // DefaultConfig are the recommended defaults to use. var DefaultConfig = Config{ Enabled: true, InitialInterval: 5 * time.Second, MaxInterval: 30 * time.Second, MaxElapsedTime: time.Minute, } // Config defines configuration for retrying batches in case of export failure // using an exponential backoff. type Config struct { // Enabled indicates whether to not retry sending batches in case of // export failure. Enabled bool // InitialInterval the time to wait after the first failure before // retrying. InitialInterval time.Duration // MaxInterval is the upper bound on backoff interval. Once this value is // reached the delay between consecutive retries will always be // `MaxInterval`. MaxInterval time.Duration // MaxElapsedTime is the maximum amount of time (including retries) spent // trying to send a request/batch. Once this value is reached, the data // is discarded. MaxElapsedTime time.Duration } // RequestFunc wraps a request with retry logic. type RequestFunc func(context.Context, func(context.Context) error) error // EvaluateFunc returns if an error is retry-able and if an explicit throttle // duration should be honored that was included in the error. // // The function must return true if the error argument is retry-able, // otherwise it must return false for the first return parameter. // // The function must return a non-zero time.Duration if the error contains // explicit throttle duration that should be honored, otherwise it must return // a zero valued time.Duration. type EvaluateFunc func(error) (bool, time.Duration) // RequestFunc returns a RequestFunc using the evaluate function to determine // if requests can be retried and based on the exponential backoff // configuration of c. func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { if !c.Enabled { return func(ctx context.Context, fn func(context.Context) error) error { return fn(ctx) } } return func(ctx context.Context, fn func(context.Context) error) error { // Do not use NewExponentialBackOff since it calls Reset and the code here // must call Reset after changing the InitialInterval (this saves an // unnecessary call to Now). b := &backoff.ExponentialBackOff{ InitialInterval: c.InitialInterval, RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, MaxElapsedTime: c.MaxElapsedTime, Stop: backoff.Stop, Clock: backoff.SystemClock, } b.Reset() for { err := fn(ctx) if err == nil { return nil } retryable, throttle := evaluate(err) if !retryable { return err } bOff := b.NextBackOff() if bOff == backoff.Stop { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. var delay time.Duration if bOff > throttle { delay = bOff } else { elapsed := b.GetElapsedTime() if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { return fmt.Errorf("max retry time would elapse: %w", err) } delay = throttle } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { return fmt.Errorf("%w: %s", ctxErr, err) } } } } // Allow override for testing. var waitFunc = wait // wait takes the caller's context, and the amount of time to wait. It will // return nil if the timer fires before or at the same time as the context's // deadline. This indicates that the call can be retried. func wait(ctx context.Context, delay time.Duration) error { timer := time.NewTimer(delay) defer timer.Stop() select { case <-ctx.Done(): // Handle the case where the timer and context deadline end // simultaneously by prioritizing the timer expiration nil value // response. select { case <-timer.C: default: return ctx.Err() } case <-timer.C: } return nil } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/internal/retry/retry_test.go000066400000000000000000000145671452547353200325400ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package retry import ( "context" "errors" "math" "sync" "testing" "time" "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" ) func TestWait(t *testing.T) { tests := []struct { ctx context.Context delay time.Duration expected error }{ { ctx: context.Background(), delay: time.Duration(0), }, { ctx: context.Background(), delay: time.Duration(1), }, { ctx: context.Background(), delay: time.Duration(-1), }, { ctx: func() context.Context { ctx, cancel := context.WithCancel(context.Background()) cancel() return ctx }(), // Ensure the timer and context do not end simultaneously. delay: 1 * time.Hour, expected: context.Canceled, }, } for _, test := range tests { err := wait(test.ctx, test.delay) if test.expected == nil { assert.NoError(t, err) } else { assert.ErrorIs(t, err, test.expected) } } } func TestNonRetryableError(t *testing.T) { ev := func(error) (bool, time.Duration) { return false, 0 } reqFunc := Config{ Enabled: true, InitialInterval: 1 * time.Nanosecond, MaxInterval: 1 * time.Nanosecond, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestThrottledRetry(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. throttleDelay, backoffDelay := time.Second, time.Nanosecond ev := func(error) (bool, time.Duration) { // Retry everything with a throttle delay. return true, throttleDelay } reqFunc := Config{ Enabled: true, InitialInterval: backoffDelay, MaxInterval: backoffDelay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, delay time.Duration) error { assert.Equal(t, throttleDelay, delay, "retry not throttled") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } defer func() { waitFunc = origWait }() ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetry(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, d time.Duration) error { delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor) assert.InDelta(t, delay, d, delta, "retry not backoffed") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } t.Cleanup(func() { waitFunc = origWait }) ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetryCanceledContext(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Millisecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 10 * time.Millisecond, }.RequestFunc(ev) ctx, cancel := context.WithCancel(context.Background()) count := 0 cancel() err := reqFunc(ctx, func(context.Context) error { count++ return assert.AnError }) assert.ErrorIs(t, err, context.Canceled) assert.Contains(t, err.Error(), assert.AnError.Error()) assert.Equal(t, 1, count) } func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. tDelay, bDelay := time.Hour, time.Nanosecond ev := func(error) (bool, time.Duration) { return true, tDelay } reqFunc := Config{ Enabled: true, InitialInterval: bDelay, MaxInterval: bDelay, MaxElapsedTime: tDelay - (time.Nanosecond), }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time would elapse: ") } func TestMaxElapsedTime(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, // InitialInterval > MaxElapsedTime means immediate return. InitialInterval: 2 * delay, MaxElapsedTime: delay, }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time elapsed: ") } func TestRetryNotEnabled(t *testing.T) { ev := func(error) (bool, time.Duration) { t.Error("evaluated retry when not enabled") return false, 0 } reqFunc := Config{}.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestRetryConcurrentSafe(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } reqFunc := Config{ Enabled: true, }.RequestFunc(ev) var wg sync.WaitGroup ctx := context.Background() for i := 1; i < 5; i++ { wg.Add(1) go func() { defer wg.Done() var done bool assert.NoError(t, reqFunc(ctx, func(context.Context) error { if !done { done = true return assert.AnError } return nil })) }() } wg.Wait() } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go000066400000000000000000000157251452547353200314060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracehttp_test import ( "bytes" "compress/gzip" "context" "crypto/tls" "fmt" "io" "net" "net/http" "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlptracetest" collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) type mockCollector struct { endpoint string server *http.Server spanLock sync.Mutex spansStorage otlptracetest.SpansStorage injectHTTPStatus []int injectResponseHeader []map[string]string injectContentType string partial *collectortracepb.ExportTracePartialSuccess delay <-chan struct{} clientTLSConfig *tls.Config expectedHeaders map[string]string } func (c *mockCollector) Stop() error { return c.server.Shutdown(context.Background()) } func (c *mockCollector) MustStop(t *testing.T) { assert.NoError(t, c.server.Shutdown(context.Background())) } func (c *mockCollector) GetSpans() []*tracepb.Span { c.spanLock.Lock() defer c.spanLock.Unlock() return c.spansStorage.GetSpans() } func (c *mockCollector) GetResourceSpans() []*tracepb.ResourceSpans { c.spanLock.Lock() defer c.spanLock.Unlock() return c.spansStorage.GetResourceSpans() } func (c *mockCollector) Endpoint() string { return c.endpoint } func (c *mockCollector) ClientTLSConfig() *tls.Config { return c.clientTLSConfig } func (c *mockCollector) serveTraces(w http.ResponseWriter, r *http.Request) { if c.delay != nil { select { case <-c.delay: case <-r.Context().Done(): return } } if !c.checkHeaders(r) { w.WriteHeader(http.StatusBadRequest) return } response := collectortracepb.ExportTraceServiceResponse{ PartialSuccess: c.partial, } rawResponse, err := proto.Marshal(&response) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } h := c.getInjectResponseHeader() if injectedStatus := c.getInjectHTTPStatus(); injectedStatus != 0 { writeReply(w, rawResponse, injectedStatus, c.injectContentType, h) return } rawRequest, err := readRequest(r) if err != nil { w.WriteHeader(http.StatusInternalServerError) return } request, err := unmarshalTraceRequest(rawRequest, r.Header.Get("content-type")) if err != nil { w.WriteHeader(http.StatusBadRequest) return } writeReply(w, rawResponse, 0, c.injectContentType, h) c.spanLock.Lock() defer c.spanLock.Unlock() c.spansStorage.AddSpans(request) } func unmarshalTraceRequest(rawRequest []byte, contentType string) (*collectortracepb.ExportTraceServiceRequest, error) { request := &collectortracepb.ExportTraceServiceRequest{} if contentType != "application/x-protobuf" { return request, fmt.Errorf("invalid content-type: %s, only application/x-protobuf is supported", contentType) } err := proto.Unmarshal(rawRequest, request) return request, err } func (c *mockCollector) checkHeaders(r *http.Request) bool { for k, v := range c.expectedHeaders { got := r.Header.Get(k) if got != v { return false } } return true } func (c *mockCollector) getInjectHTTPStatus() int { if len(c.injectHTTPStatus) == 0 { return 0 } status := c.injectHTTPStatus[0] c.injectHTTPStatus = c.injectHTTPStatus[1:] if len(c.injectHTTPStatus) == 0 { c.injectHTTPStatus = nil } return status } func (c *mockCollector) getInjectResponseHeader() (h map[string]string) { if len(c.injectResponseHeader) == 0 { return } h, c.injectResponseHeader = c.injectResponseHeader[0], c.injectResponseHeader[1:] if len(c.injectResponseHeader) == 0 { c.injectResponseHeader = nil } return } func readRequest(r *http.Request) ([]byte, error) { if r.Header.Get("Content-Encoding") == "gzip" { return readGzipBody(r.Body) } return io.ReadAll(r.Body) } func readGzipBody(body io.Reader) ([]byte, error) { rawRequest := bytes.Buffer{} gunzipper, err := gzip.NewReader(body) if err != nil { return nil, err } defer gunzipper.Close() _, err = io.Copy(&rawRequest, gunzipper) if err != nil { return nil, err } return rawRequest.Bytes(), nil } func writeReply(w http.ResponseWriter, rawResponse []byte, s int, ct string, h map[string]string) { status := http.StatusOK if s != 0 { status = s } contentType := "application/x-protobuf" if ct != "" { contentType = ct } w.Header().Set("Content-Type", contentType) for k, v := range h { w.Header().Add(k, v) } w.WriteHeader(status) _, _ = w.Write(rawResponse) } type mockCollectorConfig struct { TracesURLPath string Port int InjectHTTPStatus []int InjectContentType string InjectResponseHeader []map[string]string Partial *collectortracepb.ExportTracePartialSuccess Delay <-chan struct{} WithTLS bool ExpectedHeaders map[string]string } func (c *mockCollectorConfig) fillInDefaults() { if c.TracesURLPath == "" { c.TracesURLPath = otlpconfig.DefaultTracesPath } } func runMockCollector(t *testing.T, cfg mockCollectorConfig) *mockCollector { cfg.fillInDefaults() ln, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", cfg.Port)) require.NoError(t, err) _, portStr, err := net.SplitHostPort(ln.Addr().String()) require.NoError(t, err) m := &mockCollector{ endpoint: fmt.Sprintf("localhost:%s", portStr), spansStorage: otlptracetest.NewSpansStorage(), injectHTTPStatus: cfg.InjectHTTPStatus, injectResponseHeader: cfg.InjectResponseHeader, injectContentType: cfg.InjectContentType, partial: cfg.Partial, delay: cfg.Delay, expectedHeaders: cfg.ExpectedHeaders, } mux := http.NewServeMux() mux.Handle(cfg.TracesURLPath, http.HandlerFunc(m.serveTraces)) server := &http.Server{ Handler: mux, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, } if cfg.WithTLS { pem, err := generateWeakCertificate() require.NoError(t, err) tlsCertificate, err := tls.X509KeyPair(pem.Certificate, pem.PrivateKey) require.NoError(t, err) server.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{tlsCertificate}, } m.clientTLSConfig = &tls.Config{ InsecureSkipVerify: true, } } go func() { if cfg.WithTLS { _ = server.ServeTLS(ln, "", "") } else { _ = server.Serve(ln) } }() m.server = server return m } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/otlptracehttp/options.go000066400000000000000000000103071452547353200270320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" import ( "crypto/tls" "time" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal/retry" ) // Compression describes the compression used for payloads sent to the // collector. type Compression otlpconfig.Compression const ( // NoCompression tells the driver to send payloads without // compression. NoCompression = Compression(otlpconfig.NoCompression) // GzipCompression tells the driver to send payloads after // compressing them with gzip. GzipCompression = Compression(otlpconfig.GzipCompression) ) // Option applies an option to the HTTP client. type Option interface { applyHTTPOption(otlpconfig.Config) otlpconfig.Config } func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption { converted := make([]otlpconfig.HTTPOption, len(opts)) for i, o := range opts { converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption) } return converted } // RetryConfig defines configuration for retrying batches in case of export // failure using an exponential backoff. type RetryConfig retry.Config type wrappedOption struct { otlpconfig.HTTPOption } func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config { return w.ApplyHTTPOption(cfg) } // WithEndpoint allows one to set the address of the collector // endpoint that the driver will use to send spans. If // unset, it will instead try to use // the default endpoint (localhost:4318). Note that the endpoint // must not contain any URL path. func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} } // WithCompression tells the driver to compress the sent data. func WithCompression(compression Compression) Option { return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))} } // WithURLPath allows one to override the default URL path used // for sending traces. If unset, default ("/v1/traces") will be used. func WithURLPath(urlPath string) Option { return wrappedOption{otlpconfig.WithURLPath(urlPath)} } // WithTLSClientConfig can be used to set up a custom TLS // configuration for the client used to send payloads to the // collector. Use it if you want to use a custom certificate. func WithTLSClientConfig(tlsCfg *tls.Config) Option { return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)} } // WithInsecure tells the driver to connect to the collector using the // HTTP scheme, instead of HTTPS. func WithInsecure() Option { return wrappedOption{otlpconfig.WithInsecure()} } // WithHeaders allows one to tell the driver to send additional HTTP // headers with the payloads. Specifying headers like Content-Length, // Content-Encoding and Content-Type may result in a broken driver. func WithHeaders(headers map[string]string) Option { return wrappedOption{otlpconfig.WithHeaders(headers)} } // WithTimeout tells the driver the max waiting time for the backend to process // each spans batch. If unset, the default will be 10 seconds. func WithTimeout(duration time.Duration) Option { return wrappedOption{otlpconfig.WithTimeout(duration)} } // WithRetry configures the retry policy for transient errors that may occurs // when exporting traces. An exponential back-off algorithm is used to ensure // endpoints are not overwhelmed with retries. If unset, the default retry // policy will retry after 5 seconds and increase exponentially after each // error for a total of 1 minute. func WithRetry(rc RetryConfig) Option { return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))} } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/version.go000066400000000000000000000014551452547353200241330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { return "1.21.0" } opentelemetry-go-1.21.0/exporters/otlp/otlptrace/version_test.go000066400000000000000000000021631452547353200251670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptrace_test import ( "regexp" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ) // regex taken from https://github.com/Masterminds/semver/tree/v3.1.1 var versionRegex = regexp.MustCompile(`^v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$`) func TestVersionSemver(t *testing.T) { v := otlptrace.Version() assert.NotNil(t, versionRegex.FindStringSubmatch(v), "version is not semver: %s", v) } opentelemetry-go-1.21.0/exporters/prometheus/000077500000000000000000000000001452547353200213325ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/prometheus/benchmark_test.go000066400000000000000000000032511452547353200246530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "context" "fmt" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/metric" ) func benchmarkCollect(b *testing.B, n int) { ctx := context.Background() registry := prometheus.NewRegistry() exporter, err := New(WithRegisterer(registry)) require.NoError(b, err) provider := metric.NewMeterProvider(metric.WithReader(exporter)) meter := provider.Meter("testmeter") for i := 0; i < n; i++ { counter, err := meter.Float64Counter(fmt.Sprintf("foo_%d", i)) require.NoError(b, err) counter.Add(ctx, float64(i)) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { _, err := registry.Gather() require.NoError(b, err) } } func BenchmarkCollect1(b *testing.B) { benchmarkCollect(b, 1) } func BenchmarkCollect10(b *testing.B) { benchmarkCollect(b, 10) } func BenchmarkCollect100(b *testing.B) { benchmarkCollect(b, 100) } func BenchmarkCollect1000(b *testing.B) { benchmarkCollect(b, 1000) } func BenchmarkCollect10000(b *testing.B) { benchmarkCollect(b, 10000) } opentelemetry-go-1.21.0/exporters/prometheus/config.go000066400000000000000000000115161452547353200231320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" import ( "strings" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/sdk/metric" ) // config contains options for the exporter. type config struct { registerer prometheus.Registerer disableTargetInfo bool withoutUnits bool withoutCounterSuffixes bool readerOpts []metric.ManualReaderOption disableScopeInfo bool namespace string } // newConfig creates a validated config configured with options. func newConfig(opts ...Option) config { cfg := config{} for _, opt := range opts { cfg = opt.apply(cfg) } if cfg.registerer == nil { cfg.registerer = prometheus.DefaultRegisterer } return cfg } // Option sets exporter option values. type Option interface { apply(config) config } type optionFunc func(config) config func (fn optionFunc) apply(cfg config) config { return fn(cfg) } // WithRegisterer configures which prometheus Registerer the Exporter will // register with. If no registerer is used the prometheus DefaultRegisterer is // used. func WithRegisterer(reg prometheus.Registerer) Option { return optionFunc(func(cfg config) config { cfg.registerer = reg return cfg }) } // WithAggregationSelector configure the Aggregation Selector the exporter will // use. If no AggregationSelector is provided the DefaultAggregationSelector is // used. func WithAggregationSelector(agg metric.AggregationSelector) Option { return optionFunc(func(cfg config) config { cfg.readerOpts = append(cfg.readerOpts, metric.WithAggregationSelector(agg)) return cfg }) } // WithProducer configure the metric Producer the exporter will use as a source // of external metric data. func WithProducer(producer metric.Producer) Option { return optionFunc(func(cfg config) config { cfg.readerOpts = append(cfg.readerOpts, metric.WithProducer(producer)) return cfg }) } // WithoutTargetInfo configures the Exporter to not export the resource target_info metric. // If not specified, the Exporter will create a target_info metric containing // the metrics' resource.Resource attributes. func WithoutTargetInfo() Option { return optionFunc(func(cfg config) config { cfg.disableTargetInfo = true return cfg }) } // WithoutUnits disables exporter's addition of unit suffixes to metric names, // and will also prevent unit comments from being added in OpenMetrics once // unit comments are supported. // // By default, metric names include a unit suffix to follow Prometheus naming // conventions. For example, the counter metric request.duration, with unit // milliseconds would become request_duration_milliseconds_total. // With this option set, the name would instead be request_duration_total. func WithoutUnits() Option { return optionFunc(func(cfg config) config { cfg.withoutUnits = true return cfg }) } // WithoutCounterSuffixes disables exporter's addition _total suffixes on counters. // // By default, metric names include a _total suffix to follow Prometheus naming // conventions. For example, the counter metric happy.people would become // happy_people_total. With this option set, the name would instead be // happy_people. func WithoutCounterSuffixes() Option { return optionFunc(func(cfg config) config { cfg.withoutCounterSuffixes = true return cfg }) } // WithoutScopeInfo configures the Exporter to not export the otel_scope_info metric. // If not specified, the Exporter will create a otel_scope_info metric containing // the metrics' Instrumentation Scope, and also add labels about Instrumentation Scope to all metric points. func WithoutScopeInfo() Option { return optionFunc(func(cfg config) config { cfg.disableScopeInfo = true return cfg }) } // WithNamespace configures the Exporter to prefix metric with the given namespace. // Metadata metrics such as target_info and otel_scope_info are not prefixed since these // have special behavior based on their name. func WithNamespace(ns string) Option { return optionFunc(func(cfg config) config { ns = sanitizeName(ns) if !strings.HasSuffix(ns, "_") { // namespace and metric names should be separated with an underscore, // adds a trailing underscore if there is not one already. ns = ns + "_" } cfg.namespace = ns return cfg }) } opentelemetry-go-1.21.0/exporters/prometheus/config_test.go000066400000000000000000000077151452547353200241770ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" import ( "context" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestNewConfig(t *testing.T) { registry := prometheus.NewRegistry() aggregationSelector := func(metric.InstrumentKind) metric.Aggregation { return nil } producer := &noopProducer{} testCases := []struct { name string options []Option wantConfig config }{ { name: "Default", options: nil, wantConfig: config{ registerer: prometheus.DefaultRegisterer, }, }, { name: "WithRegisterer", options: []Option{ WithRegisterer(registry), }, wantConfig: config{ registerer: registry, }, }, { name: "WithAggregationSelector", options: []Option{ WithAggregationSelector(aggregationSelector), }, wantConfig: config{ registerer: prometheus.DefaultRegisterer, readerOpts: []metric.ManualReaderOption{metric.WithAggregationSelector(aggregationSelector)}, }, }, { name: "WithProducer", options: []Option{ WithProducer(producer), }, wantConfig: config{ registerer: prometheus.DefaultRegisterer, readerOpts: []metric.ManualReaderOption{metric.WithProducer(producer)}, }, }, { name: "With Multiple Options", options: []Option{ WithRegisterer(registry), WithAggregationSelector(aggregationSelector), WithProducer(producer), }, wantConfig: config{ registerer: registry, readerOpts: []metric.ManualReaderOption{ metric.WithAggregationSelector(aggregationSelector), metric.WithProducer(producer), }, }, }, { name: "nil options do nothing", options: []Option{ WithRegisterer(nil), }, wantConfig: config{ registerer: prometheus.DefaultRegisterer, }, }, { name: "without target_info metric", options: []Option{ WithoutTargetInfo(), }, wantConfig: config{ registerer: prometheus.DefaultRegisterer, disableTargetInfo: true, }, }, { name: "unit suffixes disabled", options: []Option{ WithoutUnits(), }, wantConfig: config{ registerer: prometheus.DefaultRegisterer, withoutUnits: true, }, }, { name: "with namespace", options: []Option{ WithNamespace("test"), }, wantConfig: config{ registerer: prometheus.DefaultRegisterer, namespace: "test_", }, }, { name: "with namespace with trailing underscore", options: []Option{ WithNamespace("test_"), }, wantConfig: config{ registerer: prometheus.DefaultRegisterer, namespace: "test_", }, }, { name: "with unsanitized namespace", options: []Option{ WithNamespace("test/"), }, wantConfig: config{ registerer: prometheus.DefaultRegisterer, namespace: "test_", }, }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { cfg := newConfig(tt.options...) // only check the length of readerOpts, since they are not comparable assert.Equal(t, len(tt.wantConfig.readerOpts), len(cfg.readerOpts)) cfg.readerOpts = nil tt.wantConfig.readerOpts = nil assert.Equal(t, tt.wantConfig, cfg) }) } } type noopProducer struct{} func (*noopProducer) Produce(ctx context.Context) ([]metricdata.ScopeMetrics, error) { return nil, nil } opentelemetry-go-1.21.0/exporters/prometheus/doc.go000066400000000000000000000015531452547353200224320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package prometheus provides a Prometheus Exporter that converts // OTLP metrics into the Prometheus exposition format and implements // prometheus.Collector to provide a handler for these metrics. package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" opentelemetry-go-1.21.0/exporters/prometheus/exporter.go000066400000000000000000000352521452547353200235400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" import ( "context" "errors" "fmt" "sort" "strings" "sync" "unicode" "unicode/utf8" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) const ( targetInfoMetricName = "target_info" targetInfoDescription = "Target metadata" scopeInfoMetricName = "otel_scope_info" scopeInfoDescription = "Instrumentation Scope metadata" ) var ( scopeInfoKeys = [2]string{"otel_scope_name", "otel_scope_version"} errScopeInvalid = errors.New("invalid scope") ) // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader // interface for easy instantiation with a MeterProvider. type Exporter struct { metric.Reader } // MarshalLog returns logging data about the Exporter. func (e *Exporter) MarshalLog() interface{} { const t = "Prometheus exporter" if r, ok := e.Reader.(*metric.ManualReader); ok { under := r.MarshalLog() if data, ok := under.(struct { Type string Registered bool Shutdown bool }); ok { data.Type = t return data } } return struct{ Type string }{Type: t} } var _ metric.Reader = &Exporter{} // collector is used to implement prometheus.Collector. type collector struct { reader metric.Reader withoutUnits bool withoutCounterSuffixes bool disableScopeInfo bool namespace string mu sync.Mutex // mu protects all members below from the concurrent access. disableTargetInfo bool targetInfo prometheus.Metric scopeInfos map[instrumentation.Scope]prometheus.Metric scopeInfosInvalid map[instrumentation.Scope]struct{} metricFamilies map[string]*dto.MetricFamily } // prometheus counters MUST have a _total suffix by default: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/compatibility/prometheus_and_openmetrics.md const counterSuffix = "_total" // New returns a Prometheus Exporter. func New(opts ...Option) (*Exporter, error) { cfg := newConfig(opts...) // this assumes that the default temporality selector will always return cumulative. // we only support cumulative temporality, so building our own reader enforces this. // TODO (#3244): Enable some way to configure the reader, but not change temporality. reader := metric.NewManualReader(cfg.readerOpts...) collector := &collector{ reader: reader, disableTargetInfo: cfg.disableTargetInfo, withoutUnits: cfg.withoutUnits, withoutCounterSuffixes: cfg.withoutCounterSuffixes, disableScopeInfo: cfg.disableScopeInfo, scopeInfos: make(map[instrumentation.Scope]prometheus.Metric), scopeInfosInvalid: make(map[instrumentation.Scope]struct{}), metricFamilies: make(map[string]*dto.MetricFamily), namespace: cfg.namespace, } if err := cfg.registerer.Register(collector); err != nil { return nil, fmt.Errorf("cannot register the collector: %w", err) } e := &Exporter{ Reader: reader, } return e, nil } // Describe implements prometheus.Collector. func (c *collector) Describe(ch chan<- *prometheus.Desc) { // The Opentelemetry SDK doesn't have information on which will exist when the collector // is registered. By returning nothing we are an "unchecked" collector in Prometheus, // and assume responsibility for consistency of the metrics produced. // // See https://pkg.go.dev/github.com/prometheus/client_golang@v1.13.0/prometheus#hdr-Custom_Collectors_and_constant_Metrics } // Collect implements prometheus.Collector. // // This method is safe to call concurrently. func (c *collector) Collect(ch chan<- prometheus.Metric) { // TODO (#3047): Use a sync.Pool instead of allocating metrics every Collect. metrics := metricdata.ResourceMetrics{} err := c.reader.Collect(context.TODO(), &metrics) if err != nil { if errors.Is(err, metric.ErrReaderShutdown) { return } otel.Handle(err) if errors.Is(err, metric.ErrReaderNotRegistered) { return } } global.Debug("Prometheus exporter export", "Data", metrics) // Initialize (once) targetInfo and disableTargetInfo. func() { c.mu.Lock() defer c.mu.Unlock() if c.targetInfo == nil && !c.disableTargetInfo { targetInfo, err := createInfoMetric(targetInfoMetricName, targetInfoDescription, metrics.Resource) if err != nil { // If the target info metric is invalid, disable sending it. c.disableTargetInfo = true otel.Handle(err) return } c.targetInfo = targetInfo } }() if !c.disableTargetInfo { ch <- c.targetInfo } for _, scopeMetrics := range metrics.ScopeMetrics { var keys, values [2]string if !c.disableScopeInfo { scopeInfo, err := c.scopeInfo(scopeMetrics.Scope) if err == errScopeInvalid { // Do not report the same error multiple times. continue } if err != nil { otel.Handle(err) continue } ch <- scopeInfo keys = scopeInfoKeys values = [2]string{scopeMetrics.Scope.Name, scopeMetrics.Scope.Version} } for _, m := range scopeMetrics.Metrics { typ := c.metricType(m) if typ == nil { continue } name := c.getName(m, typ) drop, help := c.validateMetrics(name, m.Description, typ) if drop { continue } if help != "" { m.Description = help } switch v := m.Data.(type) { case metricdata.Histogram[int64]: addHistogramMetric(ch, v, m, keys, values, name) case metricdata.Histogram[float64]: addHistogramMetric(ch, v, m, keys, values, name) case metricdata.Sum[int64]: addSumMetric(ch, v, m, keys, values, name) case metricdata.Sum[float64]: addSumMetric(ch, v, m, keys, values, name) case metricdata.Gauge[int64]: addGaugeMetric(ch, v, m, keys, values, name) case metricdata.Gauge[float64]: addGaugeMetric(ch, v, m, keys, values, name) } } } } func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string) { // TODO(https://github.com/open-telemetry/opentelemetry-go/issues/3163): support exemplars for _, dp := range histogram.DataPoints { keys, values := getAttrs(dp.Attributes, ks, vs) desc := prometheus.NewDesc(name, m.Description, keys, nil) buckets := make(map[float64]uint64, len(dp.Bounds)) cumulativeCount := uint64(0) for i, bound := range dp.Bounds { cumulativeCount += dp.BucketCounts[i] buckets[bound] = cumulativeCount } m, err := prometheus.NewConstHistogram(desc, dp.Count, float64(dp.Sum), buckets, values...) if err != nil { otel.Handle(err) continue } ch <- m } } func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string) { valueType := prometheus.CounterValue if !sum.IsMonotonic { valueType = prometheus.GaugeValue } for _, dp := range sum.DataPoints { keys, values := getAttrs(dp.Attributes, ks, vs) desc := prometheus.NewDesc(name, m.Description, keys, nil) m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...) if err != nil { otel.Handle(err) continue } ch <- m } } func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string) { for _, dp := range gauge.DataPoints { keys, values := getAttrs(dp.Attributes, ks, vs) desc := prometheus.NewDesc(name, m.Description, keys, nil) m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...) if err != nil { otel.Handle(err) continue } ch <- m } } // getAttrs parses the attribute.Set to two lists of matching Prometheus-style // keys and values. It sanitizes invalid characters and handles duplicate keys // (due to sanitization) by sorting and concatenating the values following the spec. func getAttrs(attrs attribute.Set, ks, vs [2]string) ([]string, []string) { keysMap := make(map[string][]string) itr := attrs.Iter() for itr.Next() { kv := itr.Attribute() key := strings.Map(sanitizeRune, string(kv.Key)) if _, ok := keysMap[key]; !ok { keysMap[key] = []string{kv.Value.Emit()} } else { // if the sanitized key is a duplicate, append to the list of keys keysMap[key] = append(keysMap[key], kv.Value.Emit()) } } keys := make([]string, 0, attrs.Len()) values := make([]string, 0, attrs.Len()) for key, vals := range keysMap { keys = append(keys, key) sort.Slice(vals, func(i, j int) bool { return i < j }) values = append(values, strings.Join(vals, ";")) } if ks[0] != "" { keys = append(keys, ks[:]...) values = append(values, vs[:]...) } return keys, values } func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) { keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{}) desc := prometheus.NewDesc(name, description, keys, nil) return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...) } func createScopeInfoMetric(scope instrumentation.Scope) (prometheus.Metric, error) { keys := scopeInfoKeys[:] desc := prometheus.NewDesc(scopeInfoMetricName, scopeInfoDescription, keys, nil) return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), scope.Name, scope.Version) } func sanitizeRune(r rune) rune { if unicode.IsLetter(r) || unicode.IsDigit(r) || r == ':' || r == '_' { return r } return '_' } var unitSuffixes = map[string]string{ // Time "d": "_days", "h": "_hours", "min": "_minutes", "s": "_seconds", "ms": "_milliseconds", "us": "_microseconds", "ns": "_nanoseconds", // Bytes "By": "_bytes", "KiBy": "_kibibytes", "MiBy": "_mebibytes", "GiBy": "_gibibytes", "TiBy": "_tibibytes", "KBy": "_kilobytes", "MBy": "_megabytes", "GBy": "_gigabytes", "TBy": "_terabytes", // SI "m": "_meters", "V": "_volts", "A": "_amperes", "J": "_joules", "W": "_watts", "g": "_grams", // Misc "Cel": "_celsius", "Hz": "_hertz", "1": "_ratio", "%": "_percent", } // getName returns the sanitized name, prefixed with the namespace and suffixed with unit. func (c *collector) getName(m metricdata.Metrics, typ *dto.MetricType) string { name := sanitizeName(m.Name) addCounterSuffix := !c.withoutCounterSuffixes && *typ == dto.MetricType_COUNTER if addCounterSuffix { // Remove the _total suffix here, as we will re-add the total suffix // later, and it needs to come after the unit suffix. name = strings.TrimSuffix(name, counterSuffix) } if c.namespace != "" { name = c.namespace + name } if suffix, ok := unitSuffixes[m.Unit]; ok && !c.withoutUnits && !strings.HasSuffix(name, suffix) { name += suffix } if addCounterSuffix { name += counterSuffix } return name } func sanitizeName(n string) string { // This algorithm is based on strings.Map from Go 1.19. const replacement = '_' valid := func(i int, r rune) bool { // Taken from // https://github.com/prometheus/common/blob/dfbc25bd00225c70aca0d94c3c4bb7744f28ace0/model/metric.go#L92-L102 if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || r == '_' || r == ':' || (r >= '0' && r <= '9' && i > 0) { return true } return false } // This output buffer b is initialized on demand, the first time a // character needs to be replaced. var b strings.Builder for i, c := range n { if valid(i, c) { continue } if i == 0 && c >= '0' && c <= '9' { // Prefix leading number with replacement character. b.Grow(len(n) + 1) _ = b.WriteByte(byte(replacement)) break } b.Grow(len(n)) _, _ = b.WriteString(n[:i]) _ = b.WriteByte(byte(replacement)) width := utf8.RuneLen(c) n = n[i+width:] break } // Fast path for unchanged input. if b.Cap() == 0 { // b.Grow was not called above. return n } for _, c := range n { // Due to inlining, it is more performant to invoke WriteByte rather then // WriteRune. if valid(1, c) { // We are guaranteed to not be at the start. _ = b.WriteByte(byte(c)) } else { _ = b.WriteByte(byte(replacement)) } } return b.String() } func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType { switch v := m.Data.(type) { case metricdata.Histogram[int64], metricdata.Histogram[float64]: return dto.MetricType_HISTOGRAM.Enum() case metricdata.Sum[float64]: if v.IsMonotonic { return dto.MetricType_COUNTER.Enum() } return dto.MetricType_GAUGE.Enum() case metricdata.Sum[int64]: if v.IsMonotonic { return dto.MetricType_COUNTER.Enum() } return dto.MetricType_GAUGE.Enum() case metricdata.Gauge[int64], metricdata.Gauge[float64]: return dto.MetricType_GAUGE.Enum() } return nil } func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) { c.mu.Lock() defer c.mu.Unlock() scopeInfo, ok := c.scopeInfos[scope] if ok { return scopeInfo, nil } if _, ok := c.scopeInfosInvalid[scope]; ok { return nil, errScopeInvalid } scopeInfo, err := createScopeInfoMetric(scope) if err != nil { c.scopeInfosInvalid[scope] = struct{}{} return nil, fmt.Errorf("cannot create scope info metric: %w", err) } c.scopeInfos[scope] = scopeInfo return scopeInfo, nil } func (c *collector) validateMetrics(name, description string, metricType *dto.MetricType) (drop bool, help string) { c.mu.Lock() defer c.mu.Unlock() emf, exist := c.metricFamilies[name] if !exist { c.metricFamilies[name] = &dto.MetricFamily{ Name: proto.String(name), Help: proto.String(description), Type: metricType, } return false, "" } if emf.GetType() != *metricType { global.Error( errors.New("instrument type conflict"), "Using existing type definition.", "instrument", name, "existing", emf.GetType(), "dropped", *metricType, ) return true, "" } if emf.GetHelp() != description { global.Info( "Instrument description conflict, using existing", "instrument", name, "existing", emf.GetHelp(), "dropped", description, ) return false, emf.GetHelp() } return false, "" } opentelemetry-go-1.21.0/exporters/prometheus/exporter_test.go000066400000000000000000000672331452547353200246030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "context" "errors" "io" "os" "sync" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" otelmetric "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) func TestPrometheusExporter(t *testing.T) { testCases := []struct { name string emptyResource bool customResouceAttrs []attribute.KeyValue recordMetrics func(ctx context.Context, meter otelmetric.Meter) options []Option expectedFile string }{ { name: "counter", expectedFile: "testdata/counter.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter( "foo", otelmetric.WithDescription("a simple counter"), otelmetric.WithUnit("s"), ) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) attrs2 := attribute.NewSet( attribute.Key("A").String("D"), attribute.Key("C").String("B"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) }, }, { name: "counter that already has the unit suffix", expectedFile: "testdata/counter.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter( "foo.seconds", otelmetric.WithDescription("a simple counter"), otelmetric.WithUnit("s"), ) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) attrs2 := attribute.NewSet( attribute.Key("A").String("D"), attribute.Key("C").String("B"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) }, }, { name: "counter that already has a total suffix", expectedFile: "testdata/counter.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter( "foo.total", otelmetric.WithDescription("a simple counter"), otelmetric.WithUnit("s"), ) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) attrs2 := attribute.NewSet( attribute.Key("A").String("D"), attribute.Key("C").String("B"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) }, }, { name: "counter with suffixes disabled", expectedFile: "testdata/counter_disabled_suffix.txt", options: []Option{WithoutCounterSuffixes()}, recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter( "foo", otelmetric.WithDescription("a simple counter without a total suffix"), otelmetric.WithUnit("s"), ) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) attrs2 := attribute.NewSet( attribute.Key("A").String("D"), attribute.Key("C").String("B"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter.Add(ctx, 5, otelmetric.WithAttributeSet(attrs2)) }, }, { name: "gauge", expectedFile: "testdata/gauge.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), ) gauge, err := meter.Float64UpDownCounter( "bar", otelmetric.WithDescription("a fun little gauge"), otelmetric.WithUnit("1"), ) require.NoError(t, err) gauge.Add(ctx, 1.0, opt) gauge.Add(ctx, -.25, opt) }, }, { name: "histogram", expectedFile: "testdata/histogram.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), ) histogram, err := meter.Float64Histogram( "histogram_baz", otelmetric.WithDescription("a very nice histogram"), otelmetric.WithUnit("By"), ) require.NoError(t, err) histogram.Record(ctx, 23, opt) histogram.Record(ctx, 7, opt) histogram.Record(ctx, 101, opt) histogram.Record(ctx, 105, opt) }, }, { name: "sanitized attributes to labels", expectedFile: "testdata/sanitized_labels.txt", options: []Option{WithoutUnits()}, recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( // exact match, value should be overwritten attribute.Key("A.B").String("X"), attribute.Key("A.B").String("Q"), // unintended match due to sanitization, values should be concatenated attribute.Key("C.D").String("Y"), attribute.Key("C/D").String("Z"), ) counter, err := meter.Float64Counter( "foo", otelmetric.WithDescription("a sanitary counter"), // This unit is not added to otelmetric.WithUnit("By"), ) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) }, }, { name: "invalid instruments are renamed", expectedFile: "testdata/sanitized_names.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), ) // Valid. gauge, err := meter.Float64UpDownCounter("bar", otelmetric.WithDescription("a fun little gauge")) require.NoError(t, err) gauge.Add(ctx, 100, opt) gauge.Add(ctx, -25, opt) // Invalid, will be renamed. gauge, err = meter.Float64UpDownCounter("invalid.gauge.name", otelmetric.WithDescription("a gauge with an invalid name")) require.NoError(t, err) gauge.Add(ctx, 100, opt) counter, err := meter.Float64Counter("0invalid.counter.name", otelmetric.WithDescription("a counter with an invalid name")) require.ErrorIs(t, err, metric.ErrInstrumentName) counter.Add(ctx, 100, opt) histogram, err := meter.Float64Histogram("invalid.hist.name", otelmetric.WithDescription("a histogram with an invalid name")) require.NoError(t, err) histogram.Record(ctx, 23, opt) }, }, { name: "empty resource", emptyResource: true, expectedFile: "testdata/empty_resource.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter("foo", otelmetric.WithDescription("a simple counter")) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) }, }, { name: "custom resource", customResouceAttrs: []attribute.KeyValue{ attribute.Key("A").String("B"), attribute.Key("C").String("D"), }, expectedFile: "testdata/custom_resource.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter("foo", otelmetric.WithDescription("a simple counter")) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) }, }, { name: "without target_info", options: []Option{WithoutTargetInfo()}, expectedFile: "testdata/without_target_info.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter("foo", otelmetric.WithDescription("a simple counter")) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) }, }, { name: "without scope_info", options: []Option{WithoutScopeInfo()}, expectedFile: "testdata/without_scope_info.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), ) gauge, err := meter.Int64UpDownCounter( "bar", otelmetric.WithDescription("a fun little gauge"), otelmetric.WithUnit("1"), ) require.NoError(t, err) gauge.Add(ctx, 2, opt) gauge.Add(ctx, -1, opt) }, }, { name: "without scope_info and target_info", options: []Option{WithoutScopeInfo(), WithoutTargetInfo()}, expectedFile: "testdata/without_scope_and_target_info.txt", recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), ) counter, err := meter.Int64Counter( "bar", otelmetric.WithDescription("a fun little counter"), otelmetric.WithUnit("By"), ) require.NoError(t, err) counter.Add(ctx, 2, opt) counter.Add(ctx, 1, opt) }, }, { name: "with namespace", expectedFile: "testdata/with_namespace.txt", options: []Option{ WithNamespace("test"), }, recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { opt := otelmetric.WithAttributes( attribute.Key("A").String("B"), attribute.Key("C").String("D"), attribute.Key("E").Bool(true), attribute.Key("F").Int(42), ) counter, err := meter.Float64Counter("foo", otelmetric.WithDescription("a simple counter")) require.NoError(t, err) counter.Add(ctx, 5, opt) counter.Add(ctx, 10.3, opt) counter.Add(ctx, 9, opt) }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() registry := prometheus.NewRegistry() exporter, err := New(append(tc.options, WithRegisterer(registry))...) require.NoError(t, err) var res *resource.Resource if tc.emptyResource { res = resource.Empty() } else { res, err = resource.New(ctx, // always specify service.name because the default depends on the running OS resource.WithAttributes(semconv.ServiceName("prometheus_test")), // Overwrite the semconv.TelemetrySDKVersionKey value so we don't need to update every version resource.WithAttributes(semconv.TelemetrySDKVersion("latest")), resource.WithAttributes(tc.customResouceAttrs...), ) require.NoError(t, err) res, err = resource.Merge(resource.Default(), res) require.NoError(t, err) } provider := metric.NewMeterProvider( metric.WithResource(res), metric.WithReader(exporter), metric.WithView(metric.NewView( metric.Instrument{Name: "histogram_*"}, metric.Stream{Aggregation: metric.AggregationExplicitBucketHistogram{ Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}, }}, )), ) meter := provider.Meter( "testmeter", otelmetric.WithInstrumentationVersion("v0.1.0"), ) tc.recordMetrics(ctx, meter) file, err := os.Open(tc.expectedFile) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, file.Close()) }) err = testutil.GatherAndCompare(registry, file) require.NoError(t, err) }) } } func TestSantitizeName(t *testing.T) { tests := []struct { input string want string }{ {"name€_with_4_width_rune", "name__with_4_width_rune"}, {"`", "_"}, { `! "#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWKYZ[]\^_abcdefghijklmnopqrstuvwkyz{|}~`, `________________0123456789:______ABCDEFGHIJKLMNOPQRSTUVWKYZ_____abcdefghijklmnopqrstuvwkyz____`, }, // Test cases taken from // https://github.com/prometheus/common/blob/dfbc25bd00225c70aca0d94c3c4bb7744f28ace0/model/metric_test.go#L85-L136 {"Avalid_23name", "Avalid_23name"}, {"_Avalid_23name", "_Avalid_23name"}, {"1valid_23name", "_1valid_23name"}, {"avalid_23name", "avalid_23name"}, {"Ava:lid_23name", "Ava:lid_23name"}, {"a lid_23name", "a_lid_23name"}, {":leading_colon", ":leading_colon"}, {"colon:in:the:middle", "colon:in:the:middle"}, {"", ""}, } for _, test := range tests { require.Equalf(t, test.want, sanitizeName(test.input), "input: %q", test.input) } } func TestMultiScopes(t *testing.T) { ctx := context.Background() registry := prometheus.NewRegistry() exporter, err := New(WithRegisterer(registry)) require.NoError(t, err) res, err := resource.New(ctx, // always specify service.name because the default depends on the running OS resource.WithAttributes(semconv.ServiceName("prometheus_test")), // Overwrite the semconv.TelemetrySDKVersionKey value so we don't need to update every version resource.WithAttributes(semconv.TelemetrySDKVersion("latest")), ) require.NoError(t, err) res, err = resource.Merge(resource.Default(), res) require.NoError(t, err) provider := metric.NewMeterProvider( metric.WithReader(exporter), metric.WithResource(res), ) fooCounter, err := provider.Meter("meterfoo", otelmetric.WithInstrumentationVersion("v0.1.0")). Int64Counter( "foo", otelmetric.WithUnit("s"), otelmetric.WithDescription("meter foo counter")) assert.NoError(t, err) fooCounter.Add(ctx, 100, otelmetric.WithAttributes(attribute.String("type", "foo"))) barCounter, err := provider.Meter("meterbar", otelmetric.WithInstrumentationVersion("v0.1.0")). Int64Counter( "bar", otelmetric.WithUnit("s"), otelmetric.WithDescription("meter bar counter")) assert.NoError(t, err) barCounter.Add(ctx, 200, otelmetric.WithAttributes(attribute.String("type", "bar"))) file, err := os.Open("testdata/multi_scopes.txt") require.NoError(t, err) t.Cleanup(func() { require.NoError(t, file.Close()) }) err = testutil.GatherAndCompare(registry, file) require.NoError(t, err) } func TestDuplicateMetrics(t *testing.T) { ab := attribute.NewSet(attribute.String("A", "B")) withAB := otelmetric.WithAttributeSet(ab) typeBar := attribute.NewSet(attribute.String("type", "bar")) withTypeBar := otelmetric.WithAttributeSet(typeBar) typeFoo := attribute.NewSet(attribute.String("type", "foo")) withTypeFoo := otelmetric.WithAttributeSet(typeFoo) testCases := []struct { name string customResouceAttrs []attribute.KeyValue recordMetrics func(ctx context.Context, meterA, meterB otelmetric.Meter) options []Option possibleExpectedFiles []string }{ { name: "no_conflict_two_counters", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { fooA, err := meterA.Int64Counter("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter counter foo")) assert.NoError(t, err) fooA.Add(ctx, 100, withAB) fooB, err := meterB.Int64Counter("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter counter foo")) assert.NoError(t, err) fooB.Add(ctx, 100, withAB) }, possibleExpectedFiles: []string{"testdata/no_conflict_two_counters.txt"}, }, { name: "no_conflict_two_updowncounters", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { fooA, err := meterA.Int64UpDownCounter("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter gauge foo")) assert.NoError(t, err) fooA.Add(ctx, 100, withAB) fooB, err := meterB.Int64UpDownCounter("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter gauge foo")) assert.NoError(t, err) fooB.Add(ctx, 100, withAB) }, possibleExpectedFiles: []string{"testdata/no_conflict_two_updowncounters.txt"}, }, { name: "no_conflict_two_histograms", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { fooA, err := meterA.Int64Histogram("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter histogram foo")) assert.NoError(t, err) fooA.Record(ctx, 100, withAB) fooB, err := meterB.Int64Histogram("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter histogram foo")) assert.NoError(t, err) fooB.Record(ctx, 100, withAB) }, possibleExpectedFiles: []string{"testdata/no_conflict_two_histograms.txt"}, }, { name: "conflict_help_two_counters", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { barA, err := meterA.Int64Counter("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter a bar")) assert.NoError(t, err) barA.Add(ctx, 100, withTypeBar) barB, err := meterB.Int64Counter("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter b bar")) assert.NoError(t, err) barB.Add(ctx, 100, withTypeBar) }, possibleExpectedFiles: []string{ "testdata/conflict_help_two_counters_1.txt", "testdata/conflict_help_two_counters_2.txt", }, }, { name: "conflict_help_two_updowncounters", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { barA, err := meterA.Int64UpDownCounter("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter a bar")) assert.NoError(t, err) barA.Add(ctx, 100, withTypeBar) barB, err := meterB.Int64UpDownCounter("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter b bar")) assert.NoError(t, err) barB.Add(ctx, 100, withTypeBar) }, possibleExpectedFiles: []string{ "testdata/conflict_help_two_updowncounters_1.txt", "testdata/conflict_help_two_updowncounters_2.txt", }, }, { name: "conflict_help_two_histograms", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { barA, err := meterA.Int64Histogram("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter a bar")) assert.NoError(t, err) barA.Record(ctx, 100, withAB) barB, err := meterB.Int64Histogram("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter b bar")) assert.NoError(t, err) barB.Record(ctx, 100, withAB) }, possibleExpectedFiles: []string{ "testdata/conflict_help_two_histograms_1.txt", "testdata/conflict_help_two_histograms_2.txt", }, }, { name: "conflict_unit_two_counters", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { bazA, err := meterA.Int64Counter("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter bar")) assert.NoError(t, err) bazA.Add(ctx, 100, withTypeBar) bazB, err := meterB.Int64Counter("bar", otelmetric.WithUnit("s"), otelmetric.WithDescription("meter bar")) assert.NoError(t, err) bazB.Add(ctx, 100, withTypeBar) }, options: []Option{WithoutUnits()}, possibleExpectedFiles: []string{"testdata/conflict_unit_two_counters.txt"}, }, { name: "conflict_unit_two_updowncounters", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { barA, err := meterA.Int64UpDownCounter("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter gauge bar")) assert.NoError(t, err) barA.Add(ctx, 100, withTypeBar) barB, err := meterB.Int64UpDownCounter("bar", otelmetric.WithUnit("s"), otelmetric.WithDescription("meter gauge bar")) assert.NoError(t, err) barB.Add(ctx, 100, withTypeBar) }, options: []Option{WithoutUnits()}, possibleExpectedFiles: []string{"testdata/conflict_unit_two_updowncounters.txt"}, }, { name: "conflict_unit_two_histograms", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { barA, err := meterA.Int64Histogram("bar", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter histogram bar")) assert.NoError(t, err) barA.Record(ctx, 100, withAB) barB, err := meterB.Int64Histogram("bar", otelmetric.WithUnit("s"), otelmetric.WithDescription("meter histogram bar")) assert.NoError(t, err) barB.Record(ctx, 100, withAB) }, options: []Option{WithoutUnits()}, possibleExpectedFiles: []string{"testdata/conflict_unit_two_histograms.txt"}, }, { name: "conflict_type_counter_and_updowncounter", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { counter, err := meterA.Int64Counter("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter foo")) assert.NoError(t, err) counter.Add(ctx, 100, withTypeFoo) gauge, err := meterA.Int64UpDownCounter("foo_total", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter foo")) assert.NoError(t, err) gauge.Add(ctx, 200, withTypeFoo) }, options: []Option{WithoutUnits()}, possibleExpectedFiles: []string{ "testdata/conflict_type_counter_and_updowncounter_1.txt", "testdata/conflict_type_counter_and_updowncounter_2.txt", }, }, { name: "conflict_type_histogram_and_updowncounter", recordMetrics: func(ctx context.Context, meterA, meterB otelmetric.Meter) { fooA, err := meterA.Int64UpDownCounter("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter gauge foo")) assert.NoError(t, err) fooA.Add(ctx, 100, withAB) fooHistogramA, err := meterA.Int64Histogram("foo", otelmetric.WithUnit("By"), otelmetric.WithDescription("meter histogram foo")) assert.NoError(t, err) fooHistogramA.Record(ctx, 100, withAB) }, possibleExpectedFiles: []string{ "testdata/conflict_type_histogram_and_updowncounter_1.txt", "testdata/conflict_type_histogram_and_updowncounter_2.txt", }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { // initialize registry exporter ctx := context.Background() registry := prometheus.NewRegistry() exporter, err := New(append(tc.options, WithRegisterer(registry))...) require.NoError(t, err) // initialize resource res, err := resource.New(ctx, resource.WithAttributes(semconv.ServiceName("prometheus_test")), resource.WithAttributes(semconv.TelemetrySDKVersion("latest")), ) require.NoError(t, err) res, err = resource.Merge(resource.Default(), res) require.NoError(t, err) // initialize provider provider := metric.NewMeterProvider( metric.WithReader(exporter), metric.WithResource(res), ) // initialize two meter a, b meterA := provider.Meter("ma", otelmetric.WithInstrumentationVersion("v0.1.0")) meterB := provider.Meter("mb", otelmetric.WithInstrumentationVersion("v0.1.0")) tc.recordMetrics(ctx, meterA, meterB) match := false for _, filename := range tc.possibleExpectedFiles { file, ferr := os.Open(filename) require.NoError(t, ferr) t.Cleanup(func() { require.NoError(t, file.Close()) }) err = testutil.GatherAndCompare(registry, file) if err == nil { match = true break } } require.Truef(t, match, "expected export not produced: %v", err) }) } } func TestCollectorConcurrentSafe(t *testing.T) { // This tests makes sure that the implemented // https://pkg.go.dev/github.com/prometheus/client_golang/prometheus#Collector // is concurrent safe. ctx := context.Background() registry := prometheus.NewRegistry() exporter, err := New(WithRegisterer(registry)) require.NoError(t, err) provider := metric.NewMeterProvider(metric.WithReader(exporter)) meter := provider.Meter("testmeter") cnt, err := meter.Int64Counter("foo") require.NoError(t, err) cnt.Add(ctx, 100) var wg sync.WaitGroup concurrencyLevel := 10 for i := 0; i < concurrencyLevel; i++ { wg.Add(1) go func() { defer wg.Done() _, err := registry.Gather() // this calls collector.Collect assert.NoError(t, err) }() } wg.Wait() } func TestIncompatibleMeterName(t *testing.T) { defer func(orig otel.ErrorHandler) { otel.SetErrorHandler(orig) }(otel.GetErrorHandler()) errs := []error{} eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) }) otel.SetErrorHandler(eh) // This test checks that Prometheus exporter ignores // when it encounters incompatible meter name. // Invalid label or metric name leads to error returned from // createScopeInfoMetric. invalidName := string([]byte{0xff, 0xfe, 0xfd}) ctx := context.Background() registry := prometheus.NewRegistry() exporter, err := New(WithRegisterer(registry)) require.NoError(t, err) provider := metric.NewMeterProvider( metric.WithResource(resource.Empty()), metric.WithReader(exporter)) meter := provider.Meter(invalidName) cnt, err := meter.Int64Counter("foo") require.NoError(t, err) cnt.Add(ctx, 100) file, err := os.Open("testdata/TestIncompatibleMeterName.txt") require.NoError(t, err) t.Cleanup(func() { require.NoError(t, file.Close()) }) err = testutil.GatherAndCompare(registry, file) require.NoError(t, err) assert.Equal(t, 1, len(errs)) // A second collect shouldn't trigger new errors _, err = file.Seek(0, io.SeekStart) assert.NoError(t, err) err = testutil.GatherAndCompare(registry, file) require.NoError(t, err) assert.Equal(t, 1, len(errs)) } func TestShutdownExporter(t *testing.T) { var handledError error eh := otel.ErrorHandlerFunc(func(e error) { handledError = errors.Join(handledError, e) }) otel.SetErrorHandler(eh) ctx := context.Background() registry := prometheus.NewRegistry() for i := 0; i < 3; i++ { exporter, err := New(WithRegisterer(registry)) require.NoError(t, err) provider := metric.NewMeterProvider( metric.WithResource(resource.Default()), metric.WithReader(exporter)) meter := provider.Meter("testmeter") cnt, err := meter.Int64Counter("foo") require.NoError(t, err) cnt.Add(ctx, 100) // verify that metrics added to a previously shutdown MeterProvider // do not conflict with metrics added in this loop. _, err = registry.Gather() require.NoError(t, err) // Shutdown should cause future prometheus Gather() calls to no longer // include metrics from this loop's MeterProvider. err = provider.Shutdown(ctx) require.NoError(t, err) } // ensure we aren't unnecessarily logging errors from the shutdown MeterProvider require.NoError(t, handledError) } opentelemetry-go-1.21.0/exporters/prometheus/go.mod000066400000000000000000000024711452547353200224440ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/prometheus go 1.20 require ( github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.5.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.21.0 google.golang.org/protobuf v1.31.0 ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/kr/text v0.2.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel => ../.. replace go.opentelemetry.io/otel/sdk => ../../sdk replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/metric => ../../metric opentelemetry-go-1.21.0/exporters/prometheus/go.sum000066400000000000000000000102111452547353200224600ustar00rootroot00000000000000github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/prometheus/testdata/000077500000000000000000000000001452547353200231435ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/prometheus/testdata/TestIncompatibleMeterName.txt000077500000000000000000000001121452547353200307450ustar00rootroot00000000000000# HELP target_info Target metadata # TYPE target_info gauge target_info 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_help_two_counters_1.txt000066400000000000000000000012071452547353200315500ustar00rootroot00000000000000# HELP bar_bytes_total meter a bar # TYPE bar_bytes_total counter bar_bytes_total{otel_scope_name="ma",otel_scope_version="v0.1.0",type="bar"} 100 bar_bytes_total{otel_scope_name="mb",otel_scope_version="v0.1.0",type="bar"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_help_two_counters_2.txt000066400000000000000000000012071452547353200315510ustar00rootroot00000000000000# HELP bar_bytes_total meter b bar # TYPE bar_bytes_total counter bar_bytes_total{otel_scope_name="ma",otel_scope_version="v0.1.0",type="bar"} 100 bar_bytes_total{otel_scope_name="mb",otel_scope_version="v0.1.0",type="bar"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_help_two_histograms_1.txt000066400000000000000000000066011452547353200320710ustar00rootroot00000000000000# HELP bar_bytes meter a bar # TYPE bar_bytes histogram bar_bytes_bucket{A="B",le="0",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="5",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="10",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="25",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="50",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="75",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="100",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="250",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="750",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="1000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="2500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="5000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="7500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="10000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="+Inf",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_sum{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 100 bar_bytes_count{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="0",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="5",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="10",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="25",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="50",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="75",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="100",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="250",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="750",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="1000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="2500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="5000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="7500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="10000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="+Inf",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_sum{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 100 bar_bytes_count{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_help_two_histograms_2.txt000066400000000000000000000066011452547353200320720ustar00rootroot00000000000000# HELP bar_bytes meter b bar # TYPE bar_bytes histogram bar_bytes_bucket{A="B",le="0",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="5",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="10",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="25",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="50",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="75",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="100",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="250",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="750",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="1000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="2500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="5000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="7500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="10000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="+Inf",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_sum{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 100 bar_bytes_count{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="0",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="5",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="10",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="25",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="50",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="75",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bytes_bucket{A="B",le="100",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="250",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="750",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="1000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="2500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="5000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="7500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="10000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_bucket{A="B",le="+Inf",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bytes_sum{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 100 bar_bytes_count{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_help_two_updowncounters_1.txt000066400000000000000000000011551452547353200330070ustar00rootroot00000000000000# HELP bar_bytes meter a bar # TYPE bar_bytes gauge bar_bytes{otel_scope_name="ma",otel_scope_version="v0.1.0",type="bar"} 100 bar_bytes{otel_scope_name="mb",otel_scope_version="v0.1.0",type="bar"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_help_two_updowncounters_2.txt000066400000000000000000000011551452547353200330100ustar00rootroot00000000000000# HELP bar_bytes meter b bar # TYPE bar_bytes gauge bar_bytes{otel_scope_name="ma",otel_scope_version="v0.1.0",type="bar"} 100 bar_bytes{otel_scope_name="mb",otel_scope_version="v0.1.0",type="bar"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_1.txt000066400000000000000000000007361452547353200343510ustar00rootroot00000000000000# HELP foo_total meter foo # TYPE foo_total counter foo_total{otel_scope_name="ma",otel_scope_version="v0.1.0",type="foo"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_type_counter_and_updowncounter_2.txt000066400000000000000000000007341452547353200343500ustar00rootroot00000000000000# HELP foo_total meter foo # TYPE foo_total gauge foo_total{otel_scope_name="ma",otel_scope_version="v0.1.0",type="foo"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 conflict_type_histogram_and_updowncounter_1.txt000066400000000000000000000007351452547353200346070ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/prometheus/testdata# HELP foo_bytes meter gauge foo # TYPE foo_bytes gauge foo_bytes{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 conflict_type_histogram_and_updowncounter_2.txt000066400000000000000000000035621452547353200346110ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/prometheus/testdata# HELP foo_bytes meter histogram foo # TYPE foo_bytes histogram foo_bytes_bucket{A="B",le="0",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="5",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="10",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="25",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="50",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="75",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="100",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="250",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="750",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="1000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="2500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="5000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="7500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="10000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="+Inf",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_sum{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 100 foo_bytes_count{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_unit_two_counters.txt000066400000000000000000000011551452547353200313610ustar00rootroot00000000000000# HELP bar_total meter bar # TYPE bar_total counter bar_total{otel_scope_name="ma",otel_scope_version="v0.1.0",type="bar"} 100 bar_total{otel_scope_name="mb",otel_scope_version="v0.1.0",type="bar"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_unit_two_histograms.txt000066400000000000000000000062451452547353200317040ustar00rootroot00000000000000# HELP bar meter histogram bar # TYPE bar histogram bar_bucket{A="B",le="0",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="5",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="10",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="25",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="50",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="75",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="100",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="250",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="750",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="1000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="2500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="5000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="7500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="10000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="+Inf",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_sum{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 100 bar_count{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="0",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="5",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="10",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="25",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="50",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="75",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 bar_bucket{A="B",le="100",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="250",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="750",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="1000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="2500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="5000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="7500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="10000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_bucket{A="B",le="+Inf",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 bar_sum{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 100 bar_count{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/conflict_unit_two_updowncounters.txt000066400000000000000000000011311452547353200326100ustar00rootroot00000000000000# HELP bar meter gauge bar # TYPE bar gauge bar{otel_scope_name="ma",otel_scope_version="v0.1.0",type="bar"} 100 bar{otel_scope_name="mb",otel_scope_version="v0.1.0",type="bar"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/counter.txt000077500000000000000000000012061452547353200253650ustar00rootroot00000000000000# HELP foo_seconds_total a simple counter # TYPE foo_seconds_total counter foo_seconds_total{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 24.3 foo_seconds_total{A="D",C="B",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 5 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/counter_disabled_suffix.txt000077500000000000000000000012051452547353200305770ustar00rootroot00000000000000# HELP foo_seconds a simple counter without a total suffix # TYPE foo_seconds counter foo_seconds{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 24.3 foo_seconds{A="D",C="B",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 5 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/custom_resource.txt000077500000000000000000000010211452547353200271220ustar00rootroot00000000000000# HELP foo_total a simple counter # TYPE foo_total counter foo_total{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 24.3 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{A="B",C="D",service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/empty_resource.txt000077500000000000000000000006071452547353200267570ustar00rootroot00000000000000# HELP foo_total a simple counter # TYPE foo_total counter foo_total{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 24.3 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/gauge.txt000066400000000000000000000007641452547353200250030ustar00rootroot00000000000000# HELP bar_ratio a fun little gauge # TYPE bar_ratio gauge bar_ratio{A="B",C="D",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} .75 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/histogram.txt000066400000000000000000000034211452547353200257010ustar00rootroot00000000000000# HELP histogram_baz_bytes a very nice histogram # TYPE histogram_baz_bytes histogram histogram_baz_bytes_bucket{A="B",C="D",le="0",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 0 histogram_baz_bytes_bucket{A="B",C="D",le="5",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 0 histogram_baz_bytes_bucket{A="B",C="D",le="10",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 histogram_baz_bytes_bucket{A="B",C="D",le="25",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 2 histogram_baz_bytes_bucket{A="B",C="D",le="50",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 2 histogram_baz_bytes_bucket{A="B",C="D",le="75",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 2 histogram_baz_bytes_bucket{A="B",C="D",le="100",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 2 histogram_baz_bytes_bucket{A="B",C="D",le="250",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 4 histogram_baz_bytes_bucket{A="B",C="D",le="500",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 4 histogram_baz_bytes_bucket{A="B",C="D",le="1000",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 4 histogram_baz_bytes_bucket{A="B",C="D",le="+Inf",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 4 histogram_baz_bytes_sum{A="B",C="D",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 236 histogram_baz_bytes_count{A="B",C="D",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 4 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/multi_scopes.txt000066400000000000000000000013711452547353200264140ustar00rootroot00000000000000# HELP bar_seconds_total meter bar counter # TYPE bar_seconds_total counter bar_seconds_total{otel_scope_name="meterbar",otel_scope_version="v0.1.0",type="bar"} 200 # HELP foo_seconds_total meter foo counter # TYPE foo_seconds_total counter foo_seconds_total{otel_scope_name="meterfoo",otel_scope_version="v0.1.0",type="foo"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="meterfoo",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="meterbar",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/no_conflict_two_counters.txt000066400000000000000000000012031452547353200310100ustar00rootroot00000000000000# HELP foo_bytes_total meter counter foo # TYPE foo_bytes_total counter foo_bytes_total{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 100 foo_bytes_total{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/no_conflict_two_histograms.txt000066400000000000000000000066111452547353200313360ustar00rootroot00000000000000# HELP foo_bytes meter histogram foo # TYPE foo_bytes histogram foo_bytes_bucket{A="B",le="0",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="5",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="10",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="25",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="50",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="75",otel_scope_name="ma",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="100",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="250",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="750",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="1000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="2500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="5000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="7500",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="10000",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="+Inf",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_sum{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 100 foo_bytes_count{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="0",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="5",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="10",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="25",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="50",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="75",otel_scope_name="mb",otel_scope_version="v0.1.0"} 0 foo_bytes_bucket{A="B",le="100",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="250",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="750",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="1000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="2500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="5000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="7500",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="10000",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_bucket{A="B",le="+Inf",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 foo_bytes_sum{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 100 foo_bytes_count{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/no_conflict_two_updowncounters.txt000066400000000000000000000011471452547353200322540ustar00rootroot00000000000000# HELP foo_bytes meter gauge foo # TYPE foo_bytes gauge foo_bytes{A="B",otel_scope_name="ma",otel_scope_version="v0.1.0"} 100 foo_bytes{A="B",otel_scope_name="mb",otel_scope_version="v0.1.0"} 100 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="ma",otel_scope_version="v0.1.0"} 1 otel_scope_info{otel_scope_name="mb",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/sanitized_labels.txt000077500000000000000000000007761452547353200272350ustar00rootroot00000000000000# HELP foo_total a sanitary counter # TYPE foo_total counter foo_total{A_B="Q",C_D="Y;Z",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 24.3 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/sanitized_names.txt000066400000000000000000000054221452547353200270640ustar00rootroot00000000000000# HELP bar a fun little gauge # TYPE bar gauge bar{A="B",C="D",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 75 # HELP _0invalid_counter_name_total a counter with an invalid name # TYPE _0invalid_counter_name_total counter _0invalid_counter_name_total{A="B",C="D",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 100 # HELP invalid_gauge_name a gauge with an invalid name # TYPE invalid_gauge_name gauge invalid_gauge_name{A="B",C="D",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 100 # HELP invalid_hist_name a histogram with an invalid name # TYPE invalid_hist_name histogram invalid_hist_name_bucket{A="B",C="D",le="0",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 0 invalid_hist_name_bucket{A="B",C="D",le="5",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 0 invalid_hist_name_bucket{A="B",C="D",le="10",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 0 invalid_hist_name_bucket{A="B",C="D",le="25",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="50",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="75",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="100",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="250",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="500",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="750",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="1000",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="2500",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="5000",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="7500",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="10000",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_bucket{A="B",C="D",le="+Inf",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 invalid_hist_name_sum{A="B",C="D",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 23 invalid_hist_name_count{A="B",C="D",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/with_namespace.txt000077500000000000000000000010241452547353200266730ustar00rootroot00000000000000# HELP test_foo_total a simple counter # TYPE test_foo_total counter test_foo_total{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 24.3 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/without_scope_and_target_info.txt000066400000000000000000000001521452547353200320010ustar00rootroot00000000000000# HELP bar_bytes_total a fun little counter # TYPE bar_bytes_total counter bar_bytes_total{A="B",C="D"} 3 opentelemetry-go-1.21.0/exporters/prometheus/testdata/without_scope_info.txt000066400000000000000000000004341452547353200276140ustar00rootroot00000000000000# HELP bar_ratio a fun little gauge # TYPE bar_ratio gauge bar_ratio{A="B",C="D"} 1 # HELP target_info Target metadata # TYPE target_info gauge target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1 opentelemetry-go-1.21.0/exporters/prometheus/testdata/without_target_info.txt000077500000000000000000000004751452547353200300010ustar00rootroot00000000000000# HELP foo_total a simple counter # TYPE foo_total counter foo_total{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 24.3 # HELP otel_scope_info Instrumentation Scope metadata # TYPE otel_scope_info gauge otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1 opentelemetry-go-1.21.0/exporters/stdout/000077500000000000000000000000001452547353200204615ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/000077500000000000000000000000001452547353200232075ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/config.go000066400000000000000000000077361452547353200250200ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" import ( "encoding/json" "io" "os" "go.opentelemetry.io/otel/sdk/metric" ) // config contains options for the exporter. type config struct { prettyPrint bool encoder *encoderHolder temporalitySelector metric.TemporalitySelector aggregationSelector metric.AggregationSelector redactTimestamps bool } // newConfig creates a validated config configured with options. func newConfig(options ...Option) config { cfg := config{} for _, opt := range options { cfg = opt.apply(cfg) } if cfg.encoder == nil { enc := json.NewEncoder(os.Stdout) cfg.encoder = &encoderHolder{encoder: enc} } if cfg.prettyPrint { if e, ok := cfg.encoder.encoder.(*json.Encoder); ok { e.SetIndent("", "\t") } } if cfg.temporalitySelector == nil { cfg.temporalitySelector = metric.DefaultTemporalitySelector } if cfg.aggregationSelector == nil { cfg.aggregationSelector = metric.DefaultAggregationSelector } return cfg } // Option sets exporter option values. type Option interface { apply(config) config } type optionFunc func(config) config func (o optionFunc) apply(c config) config { return o(c) } // WithEncoder sets the exporter to use encoder to encode all the metric // data-types to an output. func WithEncoder(encoder Encoder) Option { return optionFunc(func(c config) config { if encoder != nil { c.encoder = &encoderHolder{encoder: encoder} } return c }) } // WithWriter sets the export stream destination. // Using this option overrides any previously set encoder. func WithWriter(w io.Writer) Option { return WithEncoder(json.NewEncoder(w)) } // WithPrettyPrint prettifies the emitted output. // This option only works if the encoder is a *json.Encoder, as is the case // when using `WithWriter`. func WithPrettyPrint() Option { return optionFunc(func(c config) config { c.prettyPrint = true return c }) } // WithTemporalitySelector sets the TemporalitySelector the exporter will use // to determine the Temporality of an instrument based on its kind. If this // option is not used, the exporter will use the DefaultTemporalitySelector // from the go.opentelemetry.io/otel/sdk/metric package. func WithTemporalitySelector(selector metric.TemporalitySelector) Option { return temporalitySelectorOption{selector: selector} } type temporalitySelectorOption struct { selector metric.TemporalitySelector } func (t temporalitySelectorOption) apply(c config) config { c.temporalitySelector = t.selector return c } // WithAggregationSelector sets the AggregationSelector the exporter will use // to determine the aggregation to use for an instrument based on its kind. If // this option is not used, the exporter will use the // DefaultAggregationSelector from the go.opentelemetry.io/otel/sdk/metric // package or the aggregation explicitly passed for a view matching an // instrument. func WithAggregationSelector(selector metric.AggregationSelector) Option { return aggregationSelectorOption{selector: selector} } type aggregationSelectorOption struct { selector metric.AggregationSelector } func (t aggregationSelectorOption) apply(c config) config { c.aggregationSelector = t.selector return c } // WithoutTimestamps sets all timestamps to zero in the output stream. func WithoutTimestamps() Option { return optionFunc(func(c config) config { c.redactTimestamps = true return c }) } opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/doc.go000066400000000000000000000021071452547353200243030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package stdoutmetric provides an exporter for OpenTelemetry metric // telemetry. // // The exporter is intended to be used for testing and debugging, it is not // meant for production use. Additionally, it does not provide an interchange // format for OpenTelemetry that is supported with any stability or // compatibility guarantees. If these are needed features, please use the OTLP // exporter instead. package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/encoder.go000066400000000000000000000026041452547353200251570ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" import ( "errors" ) // Encoder encodes and outputs OpenTelemetry metric data-types as human // readable text. type Encoder interface { // Encode handles the encoding and writing of OpenTelemetry metric data. Encode(v any) error } // encoderHolder is the concrete type used to wrap an Encoder so it can be // used as a atomic.Value type. type encoderHolder struct { encoder Encoder } func (e encoderHolder) Encode(v any) error { return e.encoder.Encode(v) } // shutdownEncoder is used when the exporter is shutdown. It always returns // errShutdown when Encode is called. type shutdownEncoder struct{} var errShutdown = errors.New("exporter shutdown") func (shutdownEncoder) Encode(any) error { return errShutdown } opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/example_test.go000066400000000000000000000225341452547353200262360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdoutmetric_test import ( "context" "encoding/json" "os" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) var ( // Sat Jan 01 2000 00:00:00 GMT+0000. now = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0)) res = resource.NewSchemaless( semconv.ServiceName("stdoutmetric-example"), ) mockData = metricdata.ResourceMetrics{ Resource: res, ScopeMetrics: []metricdata.ScopeMetrics{ { Scope: instrumentation.Scope{Name: "example", Version: "0.0.1"}, Metrics: []metricdata.Metrics{ { Name: "requests", Description: "Number of requests received", Unit: "1", Data: metricdata.Sum[int64]{ IsMonotonic: true, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[int64]{ { Attributes: attribute.NewSet(attribute.String("server", "central")), StartTime: now, Time: now.Add(1 * time.Second), Value: 5, }, }, }, }, { Name: "system.cpu.time", Description: "Accumulated CPU time spent", Unit: "s", Data: metricdata.Sum[float64]{ IsMonotonic: true, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[float64]{ { Attributes: attribute.NewSet(attribute.String("state", "user")), StartTime: now, Time: now.Add(1 * time.Second), Value: 0.5, }, }, }, }, { Name: "latency", Description: "Time spend processing received requests", Unit: "ms", Data: metricdata.Histogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{ { Attributes: attribute.NewSet(attribute.String("server", "central")), StartTime: now, Time: now.Add(1 * time.Second), Count: 10, Bounds: []float64{1, 5, 10}, BucketCounts: []uint64{1, 3, 6, 0}, Sum: 57, }, }, }, }, { Name: "system.memory.usage", Description: "Memory usage", Unit: "By", Data: metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ { Attributes: attribute.NewSet(attribute.String("state", "used")), Time: now.Add(1 * time.Second), Value: 100, }, }, }, }, { Name: "temperature", Description: "CPU global temperature", Unit: "cel(1 K)", Data: metricdata.Gauge[float64]{ DataPoints: []metricdata.DataPoint[float64]{ { Attributes: attribute.NewSet(attribute.String("server", "central")), Time: now.Add(1 * time.Second), Value: 32.4, }, }, }, }, }, }, }, } ) func Example() { // Print with a JSON encoder that indents with two spaces. enc := json.NewEncoder(os.Stdout) enc.SetIndent("", " ") exp, err := stdoutmetric.New( stdoutmetric.WithEncoder(enc), stdoutmetric.WithoutTimestamps(), ) if err != nil { panic(err) } // Register the exporter with an SDK via a periodic reader. sdk := metric.NewMeterProvider( metric.WithResource(res), metric.WithReader(metric.NewPeriodicReader(exp)), ) ctx := context.Background() // This is where the sdk would be used to create a Meter and from that // instruments that would make measurements of your code. To simulate that // behavior, call export directly with mocked data. _ = exp.Export(ctx, &mockData) // Ensure the periodic reader is cleaned up by shutting down the sdk. _ = sdk.Shutdown(ctx) // Output: // { // "Resource": [ // { // "Key": "service.name", // "Value": { // "Type": "STRING", // "Value": "stdoutmetric-example" // } // } // ], // "ScopeMetrics": [ // { // "Scope": { // "Name": "example", // "Version": "0.0.1", // "SchemaURL": "" // }, // "Metrics": [ // { // "Name": "requests", // "Description": "Number of requests received", // "Unit": "1", // "Data": { // "DataPoints": [ // { // "Attributes": [ // { // "Key": "server", // "Value": { // "Type": "STRING", // "Value": "central" // } // } // ], // "StartTime": "0001-01-01T00:00:00Z", // "Time": "0001-01-01T00:00:00Z", // "Value": 5 // } // ], // "Temporality": "DeltaTemporality", // "IsMonotonic": true // } // }, // { // "Name": "system.cpu.time", // "Description": "Accumulated CPU time spent", // "Unit": "s", // "Data": { // "DataPoints": [ // { // "Attributes": [ // { // "Key": "state", // "Value": { // "Type": "STRING", // "Value": "user" // } // } // ], // "StartTime": "0001-01-01T00:00:00Z", // "Time": "0001-01-01T00:00:00Z", // "Value": 0.5 // } // ], // "Temporality": "CumulativeTemporality", // "IsMonotonic": true // } // }, // { // "Name": "latency", // "Description": "Time spend processing received requests", // "Unit": "ms", // "Data": { // "DataPoints": [ // { // "Attributes": [ // { // "Key": "server", // "Value": { // "Type": "STRING", // "Value": "central" // } // } // ], // "StartTime": "0001-01-01T00:00:00Z", // "Time": "0001-01-01T00:00:00Z", // "Count": 10, // "Bounds": [ // 1, // 5, // 10 // ], // "BucketCounts": [ // 1, // 3, // 6, // 0 // ], // "Min": {}, // "Max": {}, // "Sum": 57 // } // ], // "Temporality": "DeltaTemporality" // } // }, // { // "Name": "system.memory.usage", // "Description": "Memory usage", // "Unit": "By", // "Data": { // "DataPoints": [ // { // "Attributes": [ // { // "Key": "state", // "Value": { // "Type": "STRING", // "Value": "used" // } // } // ], // "StartTime": "0001-01-01T00:00:00Z", // "Time": "0001-01-01T00:00:00Z", // "Value": 100 // } // ] // } // }, // { // "Name": "temperature", // "Description": "CPU global temperature", // "Unit": "cel(1 K)", // "Data": { // "DataPoints": [ // { // "Attributes": [ // { // "Key": "server", // "Value": { // "Type": "STRING", // "Value": "central" // } // } // ], // "StartTime": "0001-01-01T00:00:00Z", // "Time": "0001-01-01T00:00:00Z", // "Value": 32.4 // } // ] // } // } // ] // } // ] // } // { // "Resource": [ // { // "Key": "service.name", // "Value": { // "Type": "STRING", // "Value": "stdoutmetric-example" // } // } // ], // "ScopeMetrics": null // } } opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/exporter.go000066400000000000000000000114701452547353200254110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" import ( "context" "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // exporter is an OpenTelemetry metric exporter. type exporter struct { encVal atomic.Value // encoderHolder shutdownOnce sync.Once temporalitySelector metric.TemporalitySelector aggregationSelector metric.AggregationSelector redactTimestamps bool } // New returns a configured metric exporter. // // If no options are passed, the default exporter returned will use a JSON // encoder with tab indentations that output to STDOUT. func New(options ...Option) (metric.Exporter, error) { cfg := newConfig(options...) exp := &exporter{ temporalitySelector: cfg.temporalitySelector, aggregationSelector: cfg.aggregationSelector, redactTimestamps: cfg.redactTimestamps, } exp.encVal.Store(*cfg.encoder) return exp, nil } func (e *exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality { return e.temporalitySelector(k) } func (e *exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation { return e.aggregationSelector(k) } func (e *exporter) Export(ctx context.Context, data *metricdata.ResourceMetrics) error { select { case <-ctx.Done(): // Don't do anything if the context has already timed out. return ctx.Err() default: // Context is still valid, continue. } if e.redactTimestamps { redactTimestamps(data) } global.Debug("STDOUT exporter export", "Data", data) return e.encVal.Load().(encoderHolder).Encode(data) } func (e *exporter) ForceFlush(ctx context.Context) error { // exporter holds no state, nothing to flush. return ctx.Err() } func (e *exporter) Shutdown(ctx context.Context) error { e.shutdownOnce.Do(func() { e.encVal.Store(encoderHolder{ encoder: shutdownEncoder{}, }) }) return ctx.Err() } func (e *exporter) MarshalLog() interface{} { return struct{ Type string }{Type: "STDOUT"} } func redactTimestamps(orig *metricdata.ResourceMetrics) { for i, sm := range orig.ScopeMetrics { metrics := sm.Metrics for j, m := range metrics { data := m.Data orig.ScopeMetrics[i].Metrics[j].Data = redactAggregationTimestamps(data) } } } var errUnknownAggType = errors.New("unknown aggregation type") func redactAggregationTimestamps(orig metricdata.Aggregation) metricdata.Aggregation { switch a := orig.(type) { case metricdata.Sum[float64]: return metricdata.Sum[float64]{ Temporality: a.Temporality, DataPoints: redactDataPointTimestamps(a.DataPoints), IsMonotonic: a.IsMonotonic, } case metricdata.Sum[int64]: return metricdata.Sum[int64]{ Temporality: a.Temporality, DataPoints: redactDataPointTimestamps(a.DataPoints), IsMonotonic: a.IsMonotonic, } case metricdata.Gauge[float64]: return metricdata.Gauge[float64]{ DataPoints: redactDataPointTimestamps(a.DataPoints), } case metricdata.Gauge[int64]: return metricdata.Gauge[int64]{ DataPoints: redactDataPointTimestamps(a.DataPoints), } case metricdata.Histogram[int64]: return metricdata.Histogram[int64]{ Temporality: a.Temporality, DataPoints: redactHistogramTimestamps(a.DataPoints), } case metricdata.Histogram[float64]: return metricdata.Histogram[float64]{ Temporality: a.Temporality, DataPoints: redactHistogramTimestamps(a.DataPoints), } default: global.Error(errUnknownAggType, fmt.Sprintf("%T", a)) return orig } } func redactHistogramTimestamps[T int64 | float64](hdp []metricdata.HistogramDataPoint[T]) []metricdata.HistogramDataPoint[T] { out := make([]metricdata.HistogramDataPoint[T], len(hdp)) for i, dp := range hdp { out[i] = metricdata.HistogramDataPoint[T]{ Attributes: dp.Attributes, Count: dp.Count, Sum: dp.Sum, Bounds: dp.Bounds, BucketCounts: dp.BucketCounts, Min: dp.Min, Max: dp.Max, } } return out } func redactDataPointTimestamps[T int64 | float64](sdp []metricdata.DataPoint[T]) []metricdata.DataPoint[T] { out := make([]metricdata.DataPoint[T], len(sdp)) for i, dp := range sdp { out[i] = metricdata.DataPoint[T]{ Attributes: dp.Attributes, Value: dp.Value, } } return out } opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/exporter_test.go000066400000000000000000000110011452547353200264360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdoutmetric_test // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" import ( "bytes" "context" "encoding/json" "io" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func testEncoderOption() stdoutmetric.Option { // Discard export output for testing. enc := json.NewEncoder(io.Discard) return stdoutmetric.WithEncoder(enc) } func testCtxErrHonored(factory func(*testing.T) func(context.Context) error) func(t *testing.T) { return func(t *testing.T) { t.Helper() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) t.Run("DeadlineExceeded", func(t *testing.T) { innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) t.Cleanup(innerCancel) <-innerCtx.Done() f := factory(t) assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded) }) t.Run("Canceled", func(t *testing.T) { innerCtx, innerCancel := context.WithCancel(ctx) innerCancel() f := factory(t) assert.ErrorIs(t, f(innerCtx), context.Canceled) }) t.Run("NoError", func(t *testing.T) { f := factory(t) assert.NoError(t, f(ctx)) }) } } func TestExporterHonorsContextErrors(t *testing.T) { t.Run("Shutdown", testCtxErrHonored(func(t *testing.T) func(context.Context) error { exp, err := stdoutmetric.New(testEncoderOption()) require.NoError(t, err) return exp.Shutdown })) t.Run("ForceFlush", testCtxErrHonored(func(t *testing.T) func(context.Context) error { exp, err := stdoutmetric.New(testEncoderOption()) require.NoError(t, err) return exp.ForceFlush })) t.Run("Export", testCtxErrHonored(func(t *testing.T) func(context.Context) error { exp, err := stdoutmetric.New(testEncoderOption()) require.NoError(t, err) return func(ctx context.Context) error { data := new(metricdata.ResourceMetrics) return exp.Export(ctx, data) } })) } func TestShutdownExporterReturnsShutdownErrorOnExport(t *testing.T) { var ( data = new(metricdata.ResourceMetrics) ctx = context.Background() exp, err = stdoutmetric.New(testEncoderOption()) ) require.NoError(t, err) require.NoError(t, exp.Shutdown(ctx)) assert.EqualError(t, exp.Export(ctx, data), "exporter shutdown") } func deltaSelector(metric.InstrumentKind) metricdata.Temporality { return metricdata.DeltaTemporality } func TestExportWithOptions(t *testing.T) { var ( data = new(metricdata.ResourceMetrics) ctx = context.Background() ) for _, tt := range []struct { name string opts []stdoutmetric.Option expectedData string }{ { name: "with no options", expectedData: "{\"Resource\":null,\"ScopeMetrics\":null}\n", }, { name: "with pretty print", opts: []stdoutmetric.Option{ stdoutmetric.WithPrettyPrint(), }, expectedData: "{\n\t\"Resource\": null,\n\t\"ScopeMetrics\": null\n}\n", }, } { t.Run(tt.name, func(t *testing.T) { var b bytes.Buffer opts := append(tt.opts, stdoutmetric.WithWriter(&b)) exp, err := stdoutmetric.New(opts...) require.NoError(t, err) require.NoError(t, exp.Export(ctx, data)) assert.Equal(t, tt.expectedData, b.String()) }) } } func TestTemporalitySelector(t *testing.T) { exp, err := stdoutmetric.New( testEncoderOption(), stdoutmetric.WithTemporalitySelector(deltaSelector), ) require.NoError(t, err) var unknownKind metric.InstrumentKind assert.Equal(t, metricdata.DeltaTemporality, exp.Temporality(unknownKind)) } func dropSelector(metric.InstrumentKind) metric.Aggregation { return metric.AggregationDrop{} } func TestAggregationSelector(t *testing.T) { exp, err := stdoutmetric.New( testEncoderOption(), stdoutmetric.WithAggregationSelector(dropSelector), ) require.NoError(t, err) var unknownKind metric.InstrumentKind assert.Equal(t, metric.AggregationDrop{}, exp.Aggregation(unknownKind)) } opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/go.mod000066400000000000000000000016051452547353200243170ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/stdout/stdoutmetric go 1.20 require ( github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk/metric v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/metric => ../../../metric replace go.opentelemetry.io/otel => ../../.. replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric replace go.opentelemetry.io/otel/trace => ../../../trace replace go.opentelemetry.io/otel/sdk => ../../../sdk opentelemetry-go-1.21.0/exporters/stdout/stdoutmetric/go.sum000066400000000000000000000027721452547353200243520ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/stdout/stdouttrace/000077500000000000000000000000001452547353200230225ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/stdout/stdouttrace/config.go000066400000000000000000000044131452547353200246200ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" import ( "io" "os" ) var ( defaultWriter = os.Stdout defaultPrettyPrint = false defaultTimestamps = true ) // config contains options for the STDOUT exporter. type config struct { // Writer is the destination. If not set, os.Stdout is used. Writer io.Writer // PrettyPrint will encode the output into readable JSON. Default is // false. PrettyPrint bool // Timestamps specifies if timestamps should be printed. Default is // true. Timestamps bool } // newConfig creates a validated Config configured with options. func newConfig(options ...Option) (config, error) { cfg := config{ Writer: defaultWriter, PrettyPrint: defaultPrettyPrint, Timestamps: defaultTimestamps, } for _, opt := range options { cfg = opt.apply(cfg) } return cfg, nil } // Option sets the value of an option for a Config. type Option interface { apply(config) config } // WithWriter sets the export stream destination. func WithWriter(w io.Writer) Option { return writerOption{w} } type writerOption struct { W io.Writer } func (o writerOption) apply(cfg config) config { cfg.Writer = o.W return cfg } // WithPrettyPrint prettifies the emitted output. func WithPrettyPrint() Option { return prettyPrintOption(true) } type prettyPrintOption bool func (o prettyPrintOption) apply(cfg config) config { cfg.PrettyPrint = bool(o) return cfg } // WithoutTimestamps sets the export stream to not include timestamps. func WithoutTimestamps() Option { return timestampsOption(false) } type timestampsOption bool func (o timestampsOption) apply(cfg config) config { cfg.Timestamps = bool(o) return cfg } opentelemetry-go-1.21.0/exporters/stdout/stdouttrace/doc.go000066400000000000000000000014571452547353200241250ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package stdouttrace contains an OpenTelemetry exporter for tracing // telemetry to be written to an output destination as JSON. package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" opentelemetry-go-1.21.0/exporters/stdout/stdouttrace/example_test.go000066400000000000000000000046221452547353200260470ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdouttrace_test import ( "context" "fmt" "log" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" ) const ( instrumentationName = "github.com/instrumentron" instrumentationVersion = "0.1.0" ) var tracer = otel.GetTracerProvider().Tracer( instrumentationName, trace.WithInstrumentationVersion(instrumentationVersion), trace.WithSchemaURL(semconv.SchemaURL), ) func add(ctx context.Context, x, y int64) int64 { var span trace.Span _, span = tracer.Start(ctx, "Addition") defer span.End() return x + y } func multiply(ctx context.Context, x, y int64) int64 { var span trace.Span _, span = tracer.Start(ctx, "Multiplication") defer span.End() return x * y } func Resource() *resource.Resource { return resource.NewWithAttributes( semconv.SchemaURL, semconv.ServiceName("stdout-example"), semconv.ServiceVersion("0.0.1"), ) } func InstallExportPipeline(ctx context.Context) (func(context.Context) error, error) { exporter, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) if err != nil { return nil, fmt.Errorf("creating stdout exporter: %w", err) } tracerProvider := sdktrace.NewTracerProvider( sdktrace.WithBatcher(exporter), sdktrace.WithResource(Resource()), ) otel.SetTracerProvider(tracerProvider) return tracerProvider.Shutdown, nil } func Example() { ctx := context.Background() // Registers a tracer Provider globally. shutdown, err := InstallExportPipeline(ctx) if err != nil { log.Fatal(err) } defer func() { if err := shutdown(ctx); err != nil { log.Fatal(err) } }() log.Println("the answer is", add(ctx, multiply(ctx, multiply(ctx, 2, 2), 10), 2)) } opentelemetry-go-1.21.0/exporters/stdout/stdouttrace/go.mod000066400000000000000000000014041452547353200241270ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/stdout/stdouttrace go 1.20 replace ( go.opentelemetry.io/otel => ../../.. go.opentelemetry.io/otel/sdk => ../../../sdk ) require ( github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/trace => ../../../trace replace go.opentelemetry.io/otel/metric => ../../../metric opentelemetry-go-1.21.0/exporters/stdout/stdouttrace/go.sum000066400000000000000000000027721452547353200241650ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/stdout/stdouttrace/trace.go000066400000000000000000000052411452547353200244510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdouttrace // import "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" import ( "context" "encoding/json" "sync" "time" "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" ) var zeroTime time.Time var _ trace.SpanExporter = &Exporter{} // New creates an Exporter with the passed options. func New(options ...Option) (*Exporter, error) { cfg, err := newConfig(options...) if err != nil { return nil, err } enc := json.NewEncoder(cfg.Writer) if cfg.PrettyPrint { enc.SetIndent("", "\t") } return &Exporter{ encoder: enc, timestamps: cfg.Timestamps, }, nil } // Exporter is an implementation of trace.SpanSyncer that writes spans to stdout. type Exporter struct { encoder *json.Encoder encoderMu sync.Mutex timestamps bool stoppedMu sync.RWMutex stopped bool } // ExportSpans writes spans in json format to stdout. func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error { e.stoppedMu.RLock() stopped := e.stopped e.stoppedMu.RUnlock() if stopped { return nil } if len(spans) == 0 { return nil } stubs := tracetest.SpanStubsFromReadOnlySpans(spans) e.encoderMu.Lock() defer e.encoderMu.Unlock() for i := range stubs { stub := &stubs[i] // Remove timestamps if !e.timestamps { stub.StartTime = zeroTime stub.EndTime = zeroTime for j := range stub.Events { ev := &stub.Events[j] ev.Time = zeroTime } } // Encode span stubs, one by one if err := e.encoder.Encode(stub); err != nil { return err } } return nil } // Shutdown is called to stop the exporter, it performs no action. func (e *Exporter) Shutdown(ctx context.Context) error { e.stoppedMu.Lock() e.stopped = true e.stoppedMu.Unlock() select { case <-ctx.Done(): return ctx.Err() default: } return nil } // MarshalLog is the marshaling function used by the logging system to represent this exporter. func (e *Exporter) MarshalLog() interface{} { return struct { Type string WithTimestamps bool }{ Type: "stdout", WithTimestamps: e.timestamps, } } opentelemetry-go-1.21.0/exporters/stdout/stdouttrace/trace_test.go000066400000000000000000000126721452547353200255160ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stdouttrace_test import ( "bytes" "context" "encoding/json" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" "go.opentelemetry.io/otel/trace" ) func TestExporterExportSpan(t *testing.T) { // setup test span now := time.Now() traceID, _ := trace.TraceIDFromHex("0102030405060708090a0b0c0d0e0f10") spanID, _ := trace.SpanIDFromHex("0102030405060708") traceState, _ := trace.ParseTraceState("key=val") keyValue := "value" doubleValue := 123.456 res := resource.NewSchemaless(attribute.String("rk1", "rv11")) ss := tracetest.SpanStub{ SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceState: traceState, }), Name: "/foo", StartTime: now, EndTime: now, Attributes: []attribute.KeyValue{ attribute.String("key", keyValue), attribute.Float64("double", doubleValue), }, Events: []tracesdk.Event{ {Name: "foo", Attributes: []attribute.KeyValue{attribute.String("key", keyValue)}, Time: now}, {Name: "bar", Attributes: []attribute.KeyValue{attribute.Float64("double", doubleValue)}, Time: now}, }, SpanKind: trace.SpanKindInternal, Status: tracesdk.Status{ Code: codes.Error, Description: "interesting", }, Resource: res, } tests := []struct { opts []stdouttrace.Option expectNow time.Time }{ { opts: []stdouttrace.Option{stdouttrace.WithPrettyPrint()}, expectNow: now, }, { opts: []stdouttrace.Option{stdouttrace.WithPrettyPrint(), stdouttrace.WithoutTimestamps()}, // expectNow is an empty time.Time }, } ctx := context.Background() for _, tt := range tests { // write to buffer for testing var b bytes.Buffer ex, err := stdouttrace.New(append(tt.opts, stdouttrace.WithWriter(&b))...) require.Nil(t, err) err = ex.ExportSpans(ctx, tracetest.SpanStubs{ss, ss}.Snapshots()) require.Nil(t, err) got := b.String() wantone := expectedJSON(tt.expectNow) assert.Equal(t, wantone+wantone, got) } } func expectedJSON(now time.Time) string { serializedNow, _ := json.Marshal(now) return `{ "Name": "/foo", "SpanContext": { "TraceID": "0102030405060708090a0b0c0d0e0f10", "SpanID": "0102030405060708", "TraceFlags": "00", "TraceState": "key=val", "Remote": false }, "Parent": { "TraceID": "00000000000000000000000000000000", "SpanID": "0000000000000000", "TraceFlags": "00", "TraceState": "", "Remote": false }, "SpanKind": 1, "StartTime": ` + string(serializedNow) + `, "EndTime": ` + string(serializedNow) + `, "Attributes": [ { "Key": "key", "Value": { "Type": "STRING", "Value": "value" } }, { "Key": "double", "Value": { "Type": "FLOAT64", "Value": 123.456 } } ], "Events": [ { "Name": "foo", "Attributes": [ { "Key": "key", "Value": { "Type": "STRING", "Value": "value" } } ], "DroppedAttributeCount": 0, "Time": ` + string(serializedNow) + ` }, { "Name": "bar", "Attributes": [ { "Key": "double", "Value": { "Type": "FLOAT64", "Value": 123.456 } } ], "DroppedAttributeCount": 0, "Time": ` + string(serializedNow) + ` } ], "Links": null, "Status": { "Code": "Error", "Description": "interesting" }, "DroppedAttributes": 0, "DroppedEvents": 0, "DroppedLinks": 0, "ChildSpanCount": 0, "Resource": [ { "Key": "rk1", "Value": { "Type": "STRING", "Value": "rv11" } } ], "InstrumentationLibrary": { "Name": "", "Version": "", "SchemaURL": "" } } ` } func TestExporterShutdownHonorsTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() e, err := stdouttrace.New() if err != nil { t.Fatalf("failed to create exporter: %v", err) } innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) defer innerCancel() <-innerCtx.Done() err = e.Shutdown(innerCtx) assert.ErrorIs(t, err, context.DeadlineExceeded) } func TestExporterShutdownHonorsCancel(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() e, err := stdouttrace.New() if err != nil { t.Fatalf("failed to create exporter: %v", err) } innerCtx, innerCancel := context.WithCancel(ctx) innerCancel() err = e.Shutdown(innerCtx) assert.ErrorIs(t, err, context.Canceled) } func TestExporterShutdownNoError(t *testing.T) { e, err := stdouttrace.New() if err != nil { t.Fatalf("failed to create exporter: %v", err) } if err := e.Shutdown(context.Background()); err != nil { t.Errorf("shutdown errored: expected nil, got %v", err) } } opentelemetry-go-1.21.0/exporters/zipkin/000077500000000000000000000000001452547353200204435ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/zipkin/doc.go000066400000000000000000000013441452547353200215410ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package zipkin contains an OpenTelemetry tracing exporter for Zipkin. package zipkin // import "go.opentelemetry.io/otel/exporters/zipkin" opentelemetry-go-1.21.0/exporters/zipkin/env.go000066400000000000000000000017531452547353200215700ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zipkin // import "go.opentelemetry.io/otel/exporters/zipkin" import "os" // Environment variable names. const ( // Endpoint for Zipkin collector. envEndpoint = "OTEL_EXPORTER_ZIPKIN_ENDPOINT" ) // envOr returns an env variable's value if it is exists or the default if not. func envOr(key, defaultValue string) string { if v := os.Getenv(key); v != "" { return v } return defaultValue } opentelemetry-go-1.21.0/exporters/zipkin/env_test.go000066400000000000000000000035751452547353200226330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zipkin import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ottest "go.opentelemetry.io/otel/exporters/zipkin/internal/internaltest" ) func TestEnvOrWithCollectorEndpointOptionsFromEnv(t *testing.T) { testCases := []struct { name string envEndpoint string defaultCollectorEndpoint string expectedCollectorEndpoint string }{ { name: "overrides value via environment variables", envEndpoint: "http://localhost:19411/foo", defaultCollectorEndpoint: defaultCollectorURL, expectedCollectorEndpoint: "http://localhost:19411/foo", }, { name: "environment variables is empty, will not overwrite value", envEndpoint: "", defaultCollectorEndpoint: defaultCollectorURL, expectedCollectorEndpoint: defaultCollectorURL, }, } envStore := ottest.NewEnvStore() envStore.Record(envEndpoint) defer func() { require.NoError(t, envStore.Restore()) }() for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { require.NoError(t, os.Setenv(envEndpoint, tc.envEndpoint)) endpoint := envOr(envEndpoint, tc.defaultCollectorEndpoint) assert.Equal(t, tc.expectedCollectorEndpoint, endpoint) }) } } opentelemetry-go-1.21.0/exporters/zipkin/go.mod000066400000000000000000000014401452547353200215500ustar00rootroot00000000000000module go.opentelemetry.io/otel/exporters/zipkin go 1.20 require ( github.com/go-logr/logr v1.3.0 github.com/go-logr/stdr v1.2.2 github.com/google/go-cmp v0.6.0 github.com/openzipkin/zipkin-go v0.4.2 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel => ../.. replace go.opentelemetry.io/otel/sdk => ../../sdk replace go.opentelemetry.io/otel/metric => ../../metric opentelemetry-go-1.21.0/exporters/zipkin/go.sum000066400000000000000000000034061452547353200216010ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA= github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/exporters/zipkin/internal/000077500000000000000000000000001452547353200222575ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/zipkin/internal/gen.go000066400000000000000000000044671452547353200233720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/exporters/zipkin/internal" //go:generate gotmpl --body=../../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go //go:generate gotmpl --body=../../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go //go:generate gotmpl --body=../../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go //go:generate gotmpl --body=../../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go //go:generate gotmpl --body=../../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go //go:generate gotmpl --body=../../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go //go:generate gotmpl --body=../../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go //go:generate gotmpl --body=../../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/exporters/zipkin/internal/matchers\"}" --out=internaltest/harness.go //go:generate gotmpl --body=../../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go //go:generate gotmpl --body=../../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go //go:generate gotmpl --body=../../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go //go:generate gotmpl --body=../../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/000077500000000000000000000000001452547353200247735ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/alignment.go000066400000000000000000000045361452547353200273100ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/alignment.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/exporters/zipkin/internal/internaltest" /* This file contains common utilities and objects to validate memory alignment of Go types. The primary use of this functionality is intended to ensure `struct` fields that need to be 64-bit aligned so they can be passed as arguments to 64-bit atomic operations. The common workflow is to define a slice of `FieldOffset` and pass them to the `Aligned8Byte` function from within a `TestMain` function from a package's tests. It is important to make this call from the `TestMain` function prior to running the rest of the test suit as it can provide useful diagnostics about field alignment instead of ambiguous nil pointer dereference and runtime panic. For more information: https://github.com/open-telemetry/opentelemetry-go/issues/341 */ import ( "fmt" "io" ) // FieldOffset is a preprocessor representation of a struct field alignment. type FieldOffset struct { // Name of the field. Name string // Offset of the field in bytes. // // To compute this at compile time use unsafe.Offsetof. Offset uintptr } // Aligned8Byte returns if all fields are aligned modulo 8-bytes. // // Error messaging is printed to out for any field determined misaligned. func Aligned8Byte(fields []FieldOffset, out io.Writer) bool { misaligned := make([]FieldOffset, 0) for _, f := range fields { if f.Offset%8 != 0 { misaligned = append(misaligned, f) } } if len(misaligned) == 0 { return true } fmt.Fprintln(out, "struct fields not aligned for 64-bit atomic operations:") for _, f := range misaligned { fmt.Fprintf(out, " %s: %d-byte offset\n", f.Name, f.Offset) } return false } opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/env.go000066400000000000000000000041351452547353200261150ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/env.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/exporters/zipkin/internal/internaltest" import ( "os" ) type Env struct { Name string Value string Exists bool } // EnvStore stores and recovers environment variables. type EnvStore interface { // Records the environment variable into the store. Record(key string) // Restore recovers the environment variables in the store. Restore() error } var _ EnvStore = (*envStore)(nil) type envStore struct { store map[string]Env } func (s *envStore) add(env Env) { s.store[env.Name] = env } func (s *envStore) Restore() error { var err error for _, v := range s.store { if v.Exists { err = os.Setenv(v.Name, v.Value) } else { err = os.Unsetenv(v.Name) } if err != nil { return err } } return nil } func (s *envStore) setEnv(key, value string) error { s.Record(key) err := os.Setenv(key, value) if err != nil { return err } return nil } func (s *envStore) Record(key string) { originValue, exists := os.LookupEnv(key) s.add(Env{ Name: key, Value: originValue, Exists: exists, }) } func NewEnvStore() EnvStore { return newEnvStore() } func newEnvStore() *envStore { return &envStore{store: make(map[string]Env)} } func SetEnvVariables(env map[string]string) (EnvStore, error) { envStore := newEnvStore() for k, v := range env { err := envStore.setEnv(k, v) if err != nil { return nil, err } } return envStore, nil } opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/env_test.go000066400000000000000000000114731452547353200271570ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/env_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) type EnvStoreTestSuite struct { suite.Suite } func (s *EnvStoreTestSuite) Test_add() { envStore := newEnvStore() e := Env{ Name: "name", Value: "value", Exists: true, } envStore.add(e) envStore.add(e) s.Assert().Len(envStore.store, 1) } func (s *EnvStoreTestSuite) TestRecord() { testCases := []struct { name string env Env expectedEnvStore *envStore }{ { name: "record exists env", env: Env{ Name: "name", Value: "value", Exists: true, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "value", Exists: true, }, }}, }, { name: "record exists env, but its value is empty", env: Env{ Name: "name", Value: "", Exists: true, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "", Exists: true, }, }}, }, { name: "record not exists env", env: Env{ Name: "name", Exists: false, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Exists: false, }, }}, }, } for _, tc := range testCases { s.Run(tc.name, func() { if tc.env.Exists { s.Assert().NoError(os.Setenv(tc.env.Name, tc.env.Value)) } envStore := newEnvStore() envStore.Record(tc.env.Name) s.Assert().Equal(tc.expectedEnvStore, envStore) if tc.env.Exists { s.Assert().NoError(os.Unsetenv(tc.env.Name)) } }) } } func (s *EnvStoreTestSuite) TestRestore() { testCases := []struct { name string env Env expectedEnvValue string expectedEnvExists bool }{ { name: "exists env", env: Env{ Name: "name", Value: "value", Exists: true, }, expectedEnvValue: "value", expectedEnvExists: true, }, { name: "no exists env", env: Env{ Name: "name", Exists: false, }, expectedEnvExists: false, }, } for _, tc := range testCases { s.Run(tc.name, func() { envStore := newEnvStore() envStore.add(tc.env) // Backup backup := newEnvStore() backup.Record(tc.env.Name) s.Require().NoError(os.Unsetenv(tc.env.Name)) s.Assert().NoError(envStore.Restore()) v, exists := os.LookupEnv(tc.env.Name) s.Assert().Equal(tc.expectedEnvValue, v) s.Assert().Equal(tc.expectedEnvExists, exists) // Restore s.Require().NoError(backup.Restore()) }) } } func (s *EnvStoreTestSuite) Test_setEnv() { testCases := []struct { name string key string value string expectedEnvStore *envStore expectedEnvValue string expectedEnvExists bool }{ { name: "normal", key: "name", value: "value", expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "other value", Exists: true, }, }}, expectedEnvValue: "value", expectedEnvExists: true, }, } for _, tc := range testCases { s.Run(tc.name, func() { envStore := newEnvStore() // Backup backup := newEnvStore() backup.Record(tc.key) s.Require().NoError(os.Setenv(tc.key, "other value")) s.Assert().NoError(envStore.setEnv(tc.key, tc.value)) s.Assert().Equal(tc.expectedEnvStore, envStore) v, exists := os.LookupEnv(tc.key) s.Assert().Equal(tc.expectedEnvValue, v) s.Assert().Equal(tc.expectedEnvExists, exists) // Restore s.Require().NoError(backup.Restore()) }) } } func TestEnvStoreTestSuite(t *testing.T) { suite.Run(t, new(EnvStoreTestSuite)) } func TestSetEnvVariables(t *testing.T) { envs := map[string]string{ "name1": "value1", "name2": "value2", } // Backup backup := newEnvStore() for k := range envs { backup.Record(k) } defer func() { require.NoError(t, backup.Restore()) }() store, err := SetEnvVariables(envs) assert.NoError(t, err) require.IsType(t, &envStore{}, store) concreteStore := store.(*envStore) assert.Len(t, concreteStore.store, 2) assert.Equal(t, backup, concreteStore) } opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/errors.go000066400000000000000000000017021452547353200266360ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/errors.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/exporters/zipkin/internal/internaltest" type TestError string var _ error = TestError("") func NewTestError(s string) error { return TestError(s) } func (e TestError) Error() string { return string(e) } opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/harness.go000066400000000000000000000217131452547353200267710ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/harness.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/exporters/zipkin/internal/internaltest" import ( "context" "fmt" "sync" "testing" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/exporters/zipkin/internal/matchers" "go.opentelemetry.io/otel/trace" ) // Harness is a testing harness used to test implementations of the // OpenTelemetry API. type Harness struct { t *testing.T } // NewHarness returns an instantiated *Harness using t. func NewHarness(t *testing.T) *Harness { return &Harness{ t: t, } } // TestTracerProvider runs validation tests for an implementation of the OpenTelemetry // TracerProvider API. func (h *Harness) TestTracerProvider(subjectFactory func() trace.TracerProvider) { h.t.Run("#Start", func(t *testing.T) { t.Run("allow creating an arbitrary number of TracerProvider instances", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) tp1 := subjectFactory() tp2 := subjectFactory() e.Expect(tp1).NotToEqual(tp2) }) t.Run("all methods are safe to be called concurrently", func(t *testing.T) { t.Parallel() runner := func(tp trace.TracerProvider) <-chan struct{} { done := make(chan struct{}) go func(tp trace.TracerProvider) { var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func(name, version string) { _ = tp.Tracer(name, trace.WithInstrumentationVersion(version)) wg.Done() }(fmt.Sprintf("tracer %d", i%5), fmt.Sprintf("%d", i)) } wg.Wait() done <- struct{}{} }(tp) return done } matchers.NewExpecter(t).Expect(func() { // Run with multiple TracerProvider to ensure they encapsulate // their own Tracers. tp1 := subjectFactory() tp2 := subjectFactory() done1 := runner(tp1) done2 := runner(tp2) <-done1 <-done2 }).NotToPanic() }) }) } // TestTracer runs validation tests for an implementation of the OpenTelemetry // Tracer API. func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) { h.t.Run("#Start", func(t *testing.T) { t.Run("propagates the original context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctxKey := testCtxKey{} ctxValue := "ctx value" ctx := context.WithValue(context.Background(), ctxKey, ctxValue) ctx, _ = subject.Start(ctx, "test") e.Expect(ctx.Value(ctxKey)).ToEqual(ctxValue) }) t.Run("returns a span containing the expected properties", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, span := subject.Start(context.Background(), "test") e.Expect(span).NotToBeNil() e.Expect(span.SpanContext().IsValid()).ToBeTrue() }) t.Run("stores the span on the provided context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, span := subject.Start(context.Background(), "test") e.Expect(span).NotToBeNil() e.Expect(span.SpanContext()).NotToEqual(trace.SpanContext{}) e.Expect(trace.SpanFromContext(ctx)).ToEqual(span) }) t.Run("starts spans with unique trace and span IDs", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, span1 := subject.Start(context.Background(), "span1") _, span2 := subject.Start(context.Background(), "span2") sc1 := span1.SpanContext() sc2 := span2.SpanContext() e.Expect(sc1.TraceID()).NotToEqual(sc2.TraceID()) e.Expect(sc1.SpanID()).NotToEqual(sc2.SpanID()) }) t.Run("propagates a parent's trace ID through the context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, parent := subject.Start(context.Background(), "parent") _, child := subject.Start(ctx, "child") psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("ignores parent's trace ID when new root is requested", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, parent := subject.Start(context.Background(), "parent") _, child := subject.Start(ctx, "child", trace.WithNewRoot()) psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).NotToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("propagates remote parent's trace ID through the context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, remoteParent := subject.Start(context.Background(), "remote parent") parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext()) _, child := subject.Start(parentCtx, "child") psc := remoteParent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("ignores remote parent's trace ID when new root is requested", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, remoteParent := subject.Start(context.Background(), "remote parent") parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext()) _, child := subject.Start(parentCtx, "child", trace.WithNewRoot()) psc := remoteParent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).NotToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("all methods are safe to be called concurrently", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) tracer := subjectFactory() ctx, parent := tracer.Start(context.Background(), "span") runner := func(tp trace.Tracer) <-chan struct{} { done := make(chan struct{}) go func(tp trace.Tracer) { var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func(name string) { defer wg.Done() _, child := tp.Start(ctx, name) psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }(fmt.Sprintf("span %d", i)) } wg.Wait() done <- struct{}{} }(tp) return done } e.Expect(func() { done := runner(tracer) <-done }).NotToPanic() }) }) h.testSpan(subjectFactory) } func (h *Harness) testSpan(tracerFactory func() trace.Tracer) { methods := map[string]func(span trace.Span){ "#End": func(span trace.Span) { span.End() }, "#AddEvent": func(span trace.Span) { span.AddEvent("test event") }, "#AddEventWithTimestamp": func(span trace.Span) { span.AddEvent("test event", trace.WithTimestamp(time.Now().Add(1*time.Second))) }, "#SetStatus": func(span trace.Span) { span.SetStatus(codes.Error, "internal") }, "#SetName": func(span trace.Span) { span.SetName("new name") }, "#SetAttributes": func(span trace.Span) { span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123)) }, } mechanisms := map[string]func() trace.Span{ "Span created via Tracer#Start": func() trace.Span { tracer := tracerFactory() _, subject := tracer.Start(context.Background(), "test") return subject }, "Span created via span.TracerProvider()": func() trace.Span { ctx, spanA := tracerFactory().Start(context.Background(), "span1") _, spanB := spanA.TracerProvider().Tracer("second").Start(ctx, "span2") return spanB }, } for mechanismName, mechanism := range mechanisms { h.t.Run(mechanismName, func(t *testing.T) { for methodName, method := range methods { t.Run(methodName, func(t *testing.T) { t.Run("is thread-safe", func(t *testing.T) { t.Parallel() span := mechanism() wg := &sync.WaitGroup{} wg.Add(2) go func() { defer wg.Done() method(span) }() go func() { defer wg.Done() method(span) }() wg.Wait() }) }) } t.Run("#End", func(t *testing.T) { t.Run("can be called multiple times", func(t *testing.T) { t.Parallel() span := mechanism() span.End() span.End() }) }) }) } } type testCtxKey struct{} opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/text_map_carrier.go000066400000000000000000000073221452547353200306560ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_carrier.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/exporters/zipkin/internal/internaltest" import ( "sync" "testing" "go.opentelemetry.io/otel/propagation" ) // TextMapCarrier is a storage medium for a TextMapPropagator used in testing. // The methods of a TextMapCarrier are concurrent safe. type TextMapCarrier struct { mtx sync.Mutex gets []string sets [][2]string data map[string]string } var _ propagation.TextMapCarrier = (*TextMapCarrier)(nil) // NewTextMapCarrier returns a new *TextMapCarrier populated with data. func NewTextMapCarrier(data map[string]string) *TextMapCarrier { copied := make(map[string]string, len(data)) for k, v := range data { copied[k] = v } return &TextMapCarrier{data: copied} } // Keys returns the keys for which this carrier has a value. func (c *TextMapCarrier) Keys() []string { c.mtx.Lock() defer c.mtx.Unlock() result := make([]string, 0, len(c.data)) for k := range c.data { result = append(result, k) } return result } // Get returns the value associated with the passed key. func (c *TextMapCarrier) Get(key string) string { c.mtx.Lock() defer c.mtx.Unlock() c.gets = append(c.gets, key) return c.data[key] } // GotKey tests if c.Get has been called for key. func (c *TextMapCarrier) GotKey(t *testing.T, key string) bool { c.mtx.Lock() defer c.mtx.Unlock() for _, k := range c.gets { if k == key { return true } } t.Errorf("TextMapCarrier.Get(%q) has not been called", key) return false } // GotN tests if n calls to c.Get have been made. func (c *TextMapCarrier) GotN(t *testing.T, n int) bool { c.mtx.Lock() defer c.mtx.Unlock() if len(c.gets) != n { t.Errorf("TextMapCarrier.Get was called %d times, not %d", len(c.gets), n) return false } return true } // Set stores the key-value pair. func (c *TextMapCarrier) Set(key, value string) { c.mtx.Lock() defer c.mtx.Unlock() c.sets = append(c.sets, [2]string{key, value}) c.data[key] = value } // SetKeyValue tests if c.Set has been called for the key-value pair. func (c *TextMapCarrier) SetKeyValue(t *testing.T, key, value string) bool { c.mtx.Lock() defer c.mtx.Unlock() var vals []string for _, pair := range c.sets { if key == pair[0] { if value == pair[1] { return true } vals = append(vals, pair[1]) } } if len(vals) > 0 { t.Errorf("TextMapCarrier.Set called with %q and %v values, but not %s", key, vals, value) } t.Errorf("TextMapCarrier.Set(%q,%q) has not been called", key, value) return false } // SetN tests if n calls to c.Set have been made. func (c *TextMapCarrier) SetN(t *testing.T, n int) bool { c.mtx.Lock() defer c.mtx.Unlock() if len(c.sets) != n { t.Errorf("TextMapCarrier.Set was called %d times, not %d", len(c.sets), n) return false } return true } // Reset zeros out the recording state and sets the carried values to data. func (c *TextMapCarrier) Reset(data map[string]string) { copied := make(map[string]string, len(data)) for k, v := range data { copied[k] = v } c.mtx.Lock() defer c.mtx.Unlock() c.gets = nil c.sets = nil c.data = copied } opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/text_map_carrier_test.go000066400000000000000000000046051452547353200317160ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_carrier_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "reflect" "testing" ) var key, value = "test", "true" func TestTextMapCarrierKeys(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) expected, actual := []string{key}, tmc.Keys() if !reflect.DeepEqual(actual, expected) { t.Errorf("expected tmc.Keys() to be %v but it was %v", expected, actual) } } func TestTextMapCarrierGet(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) tmc.GotN(t, 0) if got := tmc.Get("empty"); got != "" { t.Errorf("TextMapCarrier.Get returned %q for an empty key", got) } tmc.GotKey(t, "empty") tmc.GotN(t, 1) if got := tmc.Get(key); got != value { t.Errorf("TextMapCarrier.Get(%q) returned %q, want %q", key, got, value) } tmc.GotKey(t, key) tmc.GotN(t, 2) } func TestTextMapCarrierSet(t *testing.T) { tmc := NewTextMapCarrier(nil) tmc.SetN(t, 0) tmc.Set(key, value) if got, ok := tmc.data[key]; !ok { t.Errorf("TextMapCarrier.Set(%q,%q) failed to store pair", key, value) } else if got != value { t.Errorf("TextMapCarrier.Set(%q,%q) stored (%q,%q), not (%q,%q)", key, value, key, got, key, value) } tmc.SetKeyValue(t, key, value) tmc.SetN(t, 1) } func TestTextMapCarrierReset(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) tmc.GotN(t, 0) tmc.SetN(t, 0) tmc.Reset(nil) tmc.GotN(t, 0) tmc.SetN(t, 0) if got := tmc.Get(key); got != "" { t.Error("TextMapCarrier.Reset() failed to clear initial data") } tmc.GotN(t, 1) tmc.GotKey(t, key) tmc.Set(key, value) tmc.SetKeyValue(t, key, value) tmc.SetN(t, 1) tmc.Reset(nil) tmc.GotN(t, 0) tmc.SetN(t, 0) if got := tmc.Get(key); got != "" { t.Error("TextMapCarrier.Reset() failed to clear data") } } opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/text_map_propagator.go000066400000000000000000000067641452547353200314160ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_propagator.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/exporters/zipkin/internal/internaltest" import ( "context" "fmt" "strconv" "strings" "testing" "go.opentelemetry.io/otel/propagation" ) type ctxKeyType string type state struct { Injections uint64 Extractions uint64 } func newState(encoded string) state { if encoded == "" { return state{} } s0, s1, _ := strings.Cut(encoded, ",") injects, _ := strconv.ParseUint(s0, 10, 64) extracts, _ := strconv.ParseUint(s1, 10, 64) return state{ Injections: injects, Extractions: extracts, } } func (s state) String() string { return fmt.Sprintf("%d,%d", s.Injections, s.Extractions) } // TextMapPropagator is a propagation.TextMapPropagator used for testing. type TextMapPropagator struct { name string ctxKey ctxKeyType } var _ propagation.TextMapPropagator = (*TextMapPropagator)(nil) // NewTextMapPropagator returns a new TextMapPropagator for testing. It will // use name as the key it injects into a TextMapCarrier when Inject is called. func NewTextMapPropagator(name string) *TextMapPropagator { return &TextMapPropagator{name: name, ctxKey: ctxKeyType(name)} } func (p *TextMapPropagator) stateFromContext(ctx context.Context) state { if v := ctx.Value(p.ctxKey); v != nil { if s, ok := v.(state); ok { return s } } return state{} } func (p *TextMapPropagator) stateFromCarrier(carrier propagation.TextMapCarrier) state { return newState(carrier.Get(p.name)) } // Inject sets cross-cutting concerns for p from ctx into carrier. func (p *TextMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { s := p.stateFromContext(ctx) s.Injections++ carrier.Set(p.name, s.String()) } // InjectedN tests if p has made n injections to carrier. func (p *TextMapPropagator) InjectedN(t *testing.T, carrier *TextMapCarrier, n int) bool { if actual := p.stateFromCarrier(carrier).Injections; actual != uint64(n) { t.Errorf("TextMapPropagator{%q} injected %d times, not %d", p.name, actual, n) return false } return true } // Extract reads cross-cutting concerns for p from carrier into ctx. func (p *TextMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { s := p.stateFromCarrier(carrier) s.Extractions++ return context.WithValue(ctx, p.ctxKey, s) } // ExtractedN tests if p has made n extractions from the lineage of ctx. // nolint (context is not first arg) func (p *TextMapPropagator) ExtractedN(t *testing.T, ctx context.Context, n int) bool { if actual := p.stateFromContext(ctx).Extractions; actual != uint64(n) { t.Errorf("TextMapPropagator{%q} extracted %d time, not %d", p.name, actual, n) return false } return true } // Fields returns the name of p as the key who's value is set with Inject. func (p *TextMapPropagator) Fields() []string { return []string{p.name} } opentelemetry-go-1.21.0/exporters/zipkin/internal/internaltest/text_map_propagator_test.go000066400000000000000000000044141452547353200324430ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_propagator_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "context" "testing" ) func TestTextMapPropagatorInjectExtract(t *testing.T) { name := "testing" ctx := context.Background() carrier := NewTextMapCarrier(map[string]string{name: value}) propagator := NewTextMapPropagator(name) propagator.Inject(ctx, carrier) // Carrier value overridden with state. if carrier.SetKeyValue(t, name, "1,0") { // Ensure nothing has been extracted yet. propagator.ExtractedN(t, ctx, 0) // Test the injection was counted. propagator.InjectedN(t, carrier, 1) } ctx = propagator.Extract(ctx, carrier) v := ctx.Value(ctxKeyType(name)) if v == nil { t.Error("TextMapPropagator.Extract failed to extract state") } if s, ok := v.(state); !ok { t.Error("TextMapPropagator.Extract did not extract proper state") } else if s.Extractions != 1 { t.Error("TextMapPropagator.Extract did not increment state.Extractions") } if carrier.GotKey(t, name) { // Test the extraction was counted. propagator.ExtractedN(t, ctx, 1) // Ensure no additional injection was recorded. propagator.InjectedN(t, carrier, 1) } } func TestTextMapPropagatorFields(t *testing.T) { name := "testing" propagator := NewTextMapPropagator(name) if got := propagator.Fields(); len(got) != 1 { t.Errorf("TextMapPropagator.Fields returned %d fields, want 1", len(got)) } else if got[0] != name { t.Errorf("TextMapPropagator.Fields returned %q, want %q", got[0], name) } } func TestNewStateEmpty(t *testing.T) { if want, got := (state{}), newState(""); got != want { t.Errorf("newState(\"\") returned %v, want %v", got, want) } } opentelemetry-go-1.21.0/exporters/zipkin/internal/matchers/000077500000000000000000000000001452547353200240655ustar00rootroot00000000000000opentelemetry-go-1.21.0/exporters/zipkin/internal/matchers/expectation.go000066400000000000000000000175731452547353200267540ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/expectation.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/exporters/zipkin/internal/matchers" import ( "fmt" "reflect" "regexp" "runtime/debug" "strings" "testing" "time" ) var stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`) type Expectation struct { t *testing.T actual interface{} } func (e *Expectation) ToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if !reflect.DeepEqual(e.actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nto equal\n\t%v", e.actual, expected)) } } func (e *Expectation) NotToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if reflect.DeepEqual(e.actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to equal\n\t%v", e.actual, expected)) } } func (e *Expectation) ToBeNil() { if e.actual != nil { e.fail(fmt.Sprintf("Expected\n\t%v\nto be nil", e.actual)) } } func (e *Expectation) NotToBeNil() { if e.actual == nil { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to be nil", e.actual)) } } func (e *Expectation) ToBeTrue() { switch a := e.actual.(type) { case bool: if !a { e.fail(fmt.Sprintf("Expected\n\t%v\nto be true", e.actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a)) } } func (e *Expectation) ToBeFalse() { switch a := e.actual.(type) { case bool: if a { e.fail(fmt.Sprintf("Expected\n\t%v\nto be false", e.actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a)) } } func (e *Expectation) NotToPanic() { switch a := e.actual.(type) { case func(): func() { defer func() { if recovered := recover(); recovered != nil { e.fail(fmt.Sprintf("Expected panic\n\t%v\nto have not been raised", recovered)) } }() a() }() default: e.fail(fmt.Sprintf("Cannot check if non-func value\n\t%v\nis truthy", a)) } } func (e *Expectation) ToSucceed() { switch actual := e.actual.(type) { case error: if actual != nil { e.fail(fmt.Sprintf("Expected error\n\t%v\nto have succeeded", actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nsucceeded", actual)) } } func (e *Expectation) ToMatchError(expected interface{}) { e.verifyExpectedNotNil(expected) actual, ok := e.actual.(error) if !ok { e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nmatches error", e.actual)) } switch expected := expected.(type) { case error: if !reflect.DeepEqual(actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected)) } case string: if actual.Error() != expected { e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected)) } default: e.fail(fmt.Sprintf("Cannot match\n\t%v\nagainst non-error\n\t%v", actual, expected)) } } func (e *Expectation) ToContain(expected interface{}) { actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() switch actualKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual)) return } expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: expectedValue = reflect.ValueOf([]interface{}{expected}) } for i := 0; i < expectedValue.Len(); i++ { var contained bool expectedElem := expectedValue.Index(i).Interface() for j := 0; j < actualValue.Len(); j++ { if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) { contained = true break } } if !contained { e.fail(fmt.Sprintf("Expected\n\t%v\nto contain\n\t%v", e.actual, expectedElem)) return } } } func (e *Expectation) NotToContain(expected interface{}) { actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() switch actualKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual)) return } expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: expectedValue = reflect.ValueOf([]interface{}{expected}) } for i := 0; i < expectedValue.Len(); i++ { expectedElem := expectedValue.Index(i).Interface() for j := 0; j < actualValue.Len(); j++ { if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to contain\n\t%v", e.actual, expectedElem)) return } } } } func (e *Expectation) ToMatchInAnyOrder(expected interface{}) { expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", expected)) return } actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() if actualKind != expectedKind { e.fail(fmt.Sprintf("Expected\n\t%v\nto be the same type as\n\t%v", e.actual, expected)) return } if actualValue.Len() != expectedValue.Len() { e.fail(fmt.Sprintf("Expected\n\t%v\nto have the same length as\n\t%v", e.actual, expected)) return } var unmatched []interface{} for i := 0; i < expectedValue.Len(); i++ { unmatched = append(unmatched, expectedValue.Index(i).Interface()) } for i := 0; i < actualValue.Len(); i++ { var found bool for j, elem := range unmatched { if reflect.DeepEqual(actualValue.Index(i).Interface(), elem) { found = true unmatched = append(unmatched[:j], unmatched[j+1:]...) break } } if !found { e.fail(fmt.Sprintf("Expected\n\t%v\nto contain the same elements as\n\t%v", e.actual, expected)) } } } func (e *Expectation) ToBeTemporally(matcher TemporalMatcher, compareTo interface{}) { if actual, ok := e.actual.(time.Time); ok { ct, ok := compareTo.(time.Time) if !ok { e.fail(fmt.Sprintf("Cannot compare to non-temporal value\n\t%v", compareTo)) return } switch matcher { case Before: if !actual.Before(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before\n\t%v", e.actual, compareTo)) } case BeforeOrSameTime: if actual.After(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before or at the same time as\n\t%v", e.actual, compareTo)) } case After: if !actual.After(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after\n\t%v", e.actual, compareTo)) } case AfterOrSameTime: if actual.Before(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after or at the same time as\n\t%v", e.actual, compareTo)) } default: e.fail("Cannot compare times with unexpected temporal matcher") } return } e.fail(fmt.Sprintf("Cannot compare non-temporal value\n\t%v", e.actual)) } func (e *Expectation) verifyExpectedNotNil(expected interface{}) { if expected == nil { e.fail("Refusing to compare with . Use `ToBeNil` or `NotToBeNil` instead.") } } func (e *Expectation) fail(msg string) { // Prune the stack trace so that it's easier to see relevant lines stack := strings.Split(string(debug.Stack()), "\n") var prunedStack []string for _, line := range stack { if !stackTracePruneRE.MatchString(line) { prunedStack = append(prunedStack, line) } } e.t.Fatalf("\n%s\n%s\n", strings.Join(prunedStack, "\n"), msg) } opentelemetry-go-1.21.0/exporters/zipkin/internal/matchers/expecter.go000066400000000000000000000020241452547353200262310ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/expecter.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/exporters/zipkin/internal/matchers" import ( "testing" ) type Expecter struct { t *testing.T } func NewExpecter(t *testing.T) *Expecter { return &Expecter{ t: t, } } func (a *Expecter) Expect(actual interface{}) *Expectation { return &Expectation{ t: a.t, actual: actual, } } opentelemetry-go-1.21.0/exporters/zipkin/internal/matchers/temporal_matcher.go000066400000000000000000000017431452547353200277470ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/temporal_matcher.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/exporters/zipkin/internal/matchers" type TemporalMatcher byte //nolint:revive // ignoring missing comments for unexported constants in an internal package const ( Before TemporalMatcher = iota BeforeOrSameTime After AfterOrSameTime ) opentelemetry-go-1.21.0/exporters/zipkin/model.go000066400000000000000000000210131452547353200220670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zipkin // import "go.opentelemetry.io/otel/exporters/zipkin" import ( "encoding/binary" "encoding/json" "fmt" "net" "strconv" "strings" zkmodel "github.com/openzipkin/zipkin-go/model" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" ) const ( keyInstrumentationLibraryName = "otel.library.name" keyInstrumentationLibraryVersion = "otel.library.version" keyPeerHostname attribute.Key = "peer.hostname" keyPeerAddress attribute.Key = "peer.address" ) var defaultServiceName string func init() { // fetch service.name from default resource for backup defaultResource := resource.Default() if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists { defaultServiceName = value.AsString() } } // SpanModels converts OpenTelemetry spans into Zipkin model spans. // This is used for exporting to Zipkin compatible tracing services. func SpanModels(batch []tracesdk.ReadOnlySpan) []zkmodel.SpanModel { models := make([]zkmodel.SpanModel, 0, len(batch)) for _, data := range batch { models = append(models, toZipkinSpanModel(data)) } return models } func getServiceName(attrs []attribute.KeyValue) string { for _, kv := range attrs { if kv.Key == semconv.ServiceNameKey { return kv.Value.AsString() } } return defaultServiceName } func toZipkinSpanModel(data tracesdk.ReadOnlySpan) zkmodel.SpanModel { return zkmodel.SpanModel{ SpanContext: toZipkinSpanContext(data), Name: data.Name(), Kind: toZipkinKind(data.SpanKind()), Timestamp: data.StartTime(), Duration: data.EndTime().Sub(data.StartTime()), Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: getServiceName(data.Resource().Attributes()), }, RemoteEndpoint: toZipkinRemoteEndpoint(data), Annotations: toZipkinAnnotations(data.Events()), Tags: toZipkinTags(data), } } func toZipkinSpanContext(data tracesdk.ReadOnlySpan) zkmodel.SpanContext { return zkmodel.SpanContext{ TraceID: toZipkinTraceID(data.SpanContext().TraceID()), ID: toZipkinID(data.SpanContext().SpanID()), ParentID: toZipkinParentID(data.Parent().SpanID()), Debug: false, Sampled: nil, Err: nil, } } func toZipkinTraceID(traceID trace.TraceID) zkmodel.TraceID { return zkmodel.TraceID{ High: binary.BigEndian.Uint64(traceID[:8]), Low: binary.BigEndian.Uint64(traceID[8:]), } } func toZipkinID(spanID trace.SpanID) zkmodel.ID { return zkmodel.ID(binary.BigEndian.Uint64(spanID[:])) } func toZipkinParentID(spanID trace.SpanID) *zkmodel.ID { if spanID.IsValid() { id := toZipkinID(spanID) return &id } return nil } func toZipkinKind(kind trace.SpanKind) zkmodel.Kind { switch kind { case trace.SpanKindUnspecified: return zkmodel.Undetermined case trace.SpanKindInternal: // The spec says we should set the kind to nil, but // the model does not allow that. return zkmodel.Undetermined case trace.SpanKindServer: return zkmodel.Server case trace.SpanKindClient: return zkmodel.Client case trace.SpanKindProducer: return zkmodel.Producer case trace.SpanKindConsumer: return zkmodel.Consumer } return zkmodel.Undetermined } func toZipkinAnnotations(events []tracesdk.Event) []zkmodel.Annotation { if len(events) == 0 { return nil } annotations := make([]zkmodel.Annotation, 0, len(events)) for _, event := range events { value := event.Name if len(event.Attributes) > 0 { jsonString := attributesToJSONMapString(event.Attributes) if jsonString != "" { value = fmt.Sprintf("%s: %s", event.Name, jsonString) } } annotations = append(annotations, zkmodel.Annotation{ Timestamp: event.Time, Value: value, }) } return annotations } func attributesToJSONMapString(attributes []attribute.KeyValue) string { m := make(map[string]interface{}, len(attributes)) for _, a := range attributes { m[(string)(a.Key)] = a.Value.AsInterface() } // if an error happens, the result will be an empty string jsonBytes, _ := json.Marshal(m) return (string)(jsonBytes) } // attributeToStringPair serializes each attribute to a string pair. func attributeToStringPair(kv attribute.KeyValue) (string, string) { switch kv.Value.Type() { // For slice attributes, serialize as JSON list string. case attribute.BOOLSLICE: data, _ := json.Marshal(kv.Value.AsBoolSlice()) return (string)(kv.Key), (string)(data) case attribute.INT64SLICE: data, _ := json.Marshal(kv.Value.AsInt64Slice()) return (string)(kv.Key), (string)(data) case attribute.FLOAT64SLICE: data, _ := json.Marshal(kv.Value.AsFloat64Slice()) return (string)(kv.Key), (string)(data) case attribute.STRINGSLICE: data, _ := json.Marshal(kv.Value.AsStringSlice()) return (string)(kv.Key), (string)(data) default: return (string)(kv.Key), kv.Value.Emit() } } // extraZipkinTags are those that may be added to every outgoing span. var extraZipkinTags = []string{ "otel.status_code", keyInstrumentationLibraryName, keyInstrumentationLibraryVersion, } func toZipkinTags(data tracesdk.ReadOnlySpan) map[string]string { attr := data.Attributes() resourceAttr := data.Resource().Attributes() m := make(map[string]string, len(attr)+len(resourceAttr)+len(extraZipkinTags)) for _, kv := range attr { k, v := attributeToStringPair(kv) m[k] = v } for _, kv := range resourceAttr { k, v := attributeToStringPair(kv) m[k] = v } if data.Status().Code != codes.Unset { // Zipkin expect to receive uppercase status values // rather than default capitalized ones. m["otel.status_code"] = strings.ToUpper(data.Status().Code.String()) } if data.Status().Code == codes.Error { m["error"] = data.Status().Description } else { delete(m, "error") } if is := data.InstrumentationScope(); is.Name != "" { m[keyInstrumentationLibraryName] = is.Name if is.Version != "" { m[keyInstrumentationLibraryVersion] = is.Version } } if len(m) == 0 { return nil } return m } // Rank determines selection order for remote endpoint. See the specification // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/zipkin.md#otlp---zipkin var remoteEndpointKeyRank = map[attribute.Key]int{ semconv.PeerServiceKey: 0, semconv.NetPeerNameKey: 1, semconv.NetSockPeerNameKey: 2, semconv.NetSockPeerAddrKey: 3, keyPeerHostname: 4, keyPeerAddress: 5, semconv.DBNameKey: 6, } func toZipkinRemoteEndpoint(data tracesdk.ReadOnlySpan) *zkmodel.Endpoint { // Should be set only for client or producer kind if sk := data.SpanKind(); sk != trace.SpanKindClient && sk != trace.SpanKindProducer { return nil } attr := data.Attributes() var endpointAttr attribute.KeyValue for _, kv := range attr { rank, ok := remoteEndpointKeyRank[kv.Key] if !ok { continue } currentKeyRank, ok := remoteEndpointKeyRank[endpointAttr.Key] if ok && rank < currentKeyRank { endpointAttr = kv } else if !ok { endpointAttr = kv } } if endpointAttr.Key == "" { return nil } if endpointAttr.Key != semconv.NetSockPeerAddrKey && endpointAttr.Value.Type() == attribute.STRING { return &zkmodel.Endpoint{ ServiceName: endpointAttr.Value.AsString(), } } return remoteEndpointPeerIPWithPort(endpointAttr.Value.AsString(), attr) } // Handles `net.peer.ip` remote endpoint separately (should include `net.peer.ip` // as well, if available). func remoteEndpointPeerIPWithPort(peerIP string, attrs []attribute.KeyValue) *zkmodel.Endpoint { ip := net.ParseIP(peerIP) if ip == nil { return nil } endpoint := &zkmodel.Endpoint{} // Determine if IPv4 or IPv6 if ip.To4() != nil { endpoint.IPv4 = ip } else { endpoint.IPv6 = ip } for _, kv := range attrs { if kv.Key == semconv.NetSockPeerPortKey { port, _ := strconv.ParseUint(kv.Value.Emit(), 10, 16) endpoint.Port = uint16(port) return endpoint } } return endpoint } opentelemetry-go-1.21.0/exporters/zipkin/model_test.go000066400000000000000000001064371452547353200231440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zipkin import ( "fmt" "net" "strconv" "testing" "time" "github.com/google/go-cmp/cmp" zkmodel "github.com/openzipkin/zipkin-go/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" ) func TestModelConversion(t *testing.T) { res := resource.NewSchemaless( semconv.ServiceName("model-test"), semconv.ServiceVersion("0.1.0"), attribute.Int64("resource-attr1", 42), attribute.IntSlice("resource-attr2", []int{0, 1, 2}), ) inputBatch := tracetest.SpanStubs{ // typical span data with UNSET status { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), attribute.IntSlice("attr3", []int{0, 1, 2}), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Unset, Description: "", }, Resource: res, }, // typical span data with OK status { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), attribute.IntSlice("attr3", []int{0, 1, 2}), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Ok, Description: "", }, Resource: res, }, // typical span data with ERROR status { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), attribute.IntSlice("attr3", []int{0, 1, 2}), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // span data with no parent (same as typical, but has // invalid parent) { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), SpanKind: trace.SpanKindServer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // span data of unspecified kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindUnspecified, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // span data of internal kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindInternal, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // span data of client kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindClient, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), attribute.String("peer.hostname", "test-peer-hostname"), attribute.String("net.sock.peer.addr", "1.2.3.4"), attribute.Int64("net.sock.peer.port", 9876), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // span data of producer kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindProducer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // span data of consumer kind { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindConsumer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Status: tracesdk.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // span data with no events { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.Int64("attr1", 42), attribute.String("attr2", "bar"), }, Events: nil, Status: tracesdk.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // span data with an "error" attribute set to "false" { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38}, }), SpanKind: trace.SpanKindServer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: []attribute.KeyValue{ attribute.String("error", "false"), }, Events: []tracesdk.Event{ { Time: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Name: "ev1", Attributes: []attribute.KeyValue{ attribute.Int64("eventattr1", 123), }, }, { Time: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Name: "ev2", Attributes: nil, }, }, Resource: res, }, }.Snapshots() expectedOutputBatch := []zkmodel.SpanModel{ // model for typical span data with UNSET status { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "SERVER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "attr3": "[0,1,2]", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for typical span data with OK status { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "SERVER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "attr3": "[0,1,2]", "otel.status_code": "OK", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for typical span data with ERROR status { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "SERVER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "attr3": "[0,1,2]", "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for span data with no parent { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: nil, Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "SERVER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for span data of unspecified kind { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for span data of internal kind { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for span data of client kind { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "CLIENT", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: &zkmodel.Endpoint{ IPv4: net.ParseIP("1.2.3.4"), Port: 9876, }, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "net.sock.peer.addr": "1.2.3.4", "net.sock.peer.port": "9876", "peer.hostname": "test-peer-hostname", "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for span data of producer kind { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "PRODUCER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for span data of consumer kind { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "CONSUMER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for span data with no events { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "SERVER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: nil, Tags: map[string]string{ "attr1": "42", "attr2": "bar", "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, }, // model for span data with an "error" attribute set to "false" { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: zkmodelIDPtr(0x3f3e3d3c3b3a3938), Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "SERVER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "model-test", }, RemoteEndpoint: nil, Annotations: []zkmodel.Annotation{ { Timestamp: time.Date(2020, time.March, 11, 19, 24, 30, 0, time.UTC), Value: `ev1: {"eventattr1":123}`, }, { Timestamp: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Value: "ev2", }, }, Tags: map[string]string{ "service.name": "model-test", "service.version": "0.1.0", "resource-attr1": "42", "resource-attr2": "[0,1,2]", }, // only resource tags should be included }, } gottenOutputBatch := SpanModels(inputBatch) require.Equal(t, expectedOutputBatch, gottenOutputBatch) } func zkmodelIDPtr(n uint64) *zkmodel.ID { id := zkmodel.ID(n) return &id } func TestTagsTransformation(t *testing.T) { keyValue := "value" doubleValue := 123.456 uintValue := int64(123) statusMessage := "this is a problem" instrLibName := "instrumentation-library" instrLibVersion := "semver:1.0.0" tests := []struct { name string data tracetest.SpanStub want map[string]string }{ { name: "attributes", data: tracetest.SpanStub{ Attributes: []attribute.KeyValue{ attribute.String("key", keyValue), attribute.Float64("double", doubleValue), attribute.Int64("uint", uintValue), attribute.Bool("ok", true), }, }, want: map[string]string{ "double": fmt.Sprint(doubleValue), "key": keyValue, "ok": "true", "uint": strconv.FormatInt(uintValue, 10), }, }, { name: "no attributes", data: tracetest.SpanStub{}, want: nil, }, { name: "omit-noerror", data: tracetest.SpanStub{ Attributes: []attribute.KeyValue{ attribute.Bool("error", false), }, }, want: nil, }, { name: "statusCode UNSET", data: tracetest.SpanStub{ Attributes: []attribute.KeyValue{ attribute.String("key", keyValue), }, Status: tracesdk.Status{ Code: codes.Unset, Description: "", }, }, want: map[string]string{ "key": keyValue, }, }, { name: "statusCode OK", data: tracetest.SpanStub{ Attributes: []attribute.KeyValue{ attribute.String("key", keyValue), attribute.Bool("ok", true), }, Status: tracesdk.Status{ Code: codes.Ok, Description: "", }, }, want: map[string]string{ "key": keyValue, "ok": "true", "otel.status_code": "OK", }, }, { name: "statusCode ERROR", data: tracetest.SpanStub{ Attributes: []attribute.KeyValue{ attribute.String("key", keyValue), attribute.Bool("error", true), }, Status: tracesdk.Status{ Code: codes.Error, Description: statusMessage, }, }, want: map[string]string{ "error": statusMessage, "key": keyValue, "otel.status_code": "ERROR", }, }, { name: "instrLib-empty", data: tracetest.SpanStub{ InstrumentationLibrary: instrumentation.Library{}, }, want: nil, }, { name: "instrLib-noversion", data: tracetest.SpanStub{ Attributes: []attribute.KeyValue{}, InstrumentationLibrary: instrumentation.Library{ Name: instrLibName, }, }, want: map[string]string{ "otel.library.name": instrLibName, }, }, { name: "instrLib-with-version", data: tracetest.SpanStub{ Attributes: []attribute.KeyValue{}, InstrumentationLibrary: instrumentation.Library{ Name: instrLibName, Version: instrLibVersion, }, }, want: map[string]string{ "otel.library.name": instrLibName, "otel.library.version": instrLibVersion, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := toZipkinTags(tt.data.Snapshot()) if diff := cmp.Diff(got, tt.want); diff != "" { t.Errorf("Diff%v", diff) } }) } } func TestRemoteEndpointTransformation(t *testing.T) { tests := []struct { name string data tracetest.SpanStub want *zkmodel.Endpoint }{ { name: "nil-not-applicable", data: tracetest.SpanStub{ SpanKind: trace.SpanKindClient, Attributes: []attribute.KeyValue{}, }, want: nil, }, { name: "nil-not-found", data: tracetest.SpanStub{ SpanKind: trace.SpanKindConsumer, Attributes: []attribute.KeyValue{ attribute.String("attr", "test"), }, }, want: nil, }, { name: "peer-service-rank", data: tracetest.SpanStub{ SpanKind: trace.SpanKindProducer, Attributes: []attribute.KeyValue{ semconv.PeerService("peer-service-test"), semconv.NetPeerName("peer-name-test"), semconv.NetSockPeerName("net-sock-peer-test"), }, }, want: &zkmodel.Endpoint{ ServiceName: "peer-service-test", }, }, { name: "net-sock-peer-rank", data: tracetest.SpanStub{ SpanKind: trace.SpanKindProducer, Attributes: []attribute.KeyValue{ semconv.NetSockPeerName("net-sock-peer-test"), semconv.DBName("db-name-test"), }, }, want: &zkmodel.Endpoint{ ServiceName: "net-sock-peer-test", }, }, { name: "db-name-rank", data: tracetest.SpanStub{ SpanKind: trace.SpanKindProducer, Attributes: []attribute.KeyValue{ attribute.String("foo", "bar"), semconv.DBName("db-name-test"), }, }, want: &zkmodel.Endpoint{ ServiceName: "db-name-test", }, }, { name: "peer-hostname-rank", data: tracetest.SpanStub{ SpanKind: trace.SpanKindProducer, Attributes: []attribute.KeyValue{ keyPeerHostname.String("peer-hostname-test"), keyPeerAddress.String("peer-address-test"), semconv.DBName("http-host-test"), }, }, want: &zkmodel.Endpoint{ ServiceName: "peer-hostname-test", }, }, { name: "peer-address-rank", data: tracetest.SpanStub{ SpanKind: trace.SpanKindProducer, Attributes: []attribute.KeyValue{ keyPeerAddress.String("peer-address-test"), semconv.DBName("http-host-test"), }, }, want: &zkmodel.Endpoint{ ServiceName: "peer-address-test", }, }, { name: "net-peer-invalid-ip", data: tracetest.SpanStub{ SpanKind: trace.SpanKindProducer, Attributes: []attribute.KeyValue{ semconv.NetSockPeerAddr("INVALID"), }, }, want: nil, }, { name: "net-peer-ipv6-no-port", data: tracetest.SpanStub{ SpanKind: trace.SpanKindProducer, Attributes: []attribute.KeyValue{ semconv.NetSockPeerAddr("0:0:1:5ee:bad:c0de:0:0"), }, }, want: &zkmodel.Endpoint{ IPv6: net.ParseIP("0:0:1:5ee:bad:c0de:0:0"), }, }, { name: "net-peer-ipv4-port", data: tracetest.SpanStub{ SpanKind: trace.SpanKindProducer, Attributes: []attribute.KeyValue{ semconv.NetSockPeerAddr("1.2.3.4"), semconv.NetSockPeerPort(9876), }, }, want: &zkmodel.Endpoint{ IPv4: net.ParseIP("1.2.3.4"), Port: 9876, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := toZipkinRemoteEndpoint(tt.data.Snapshot()) if diff := cmp.Diff(got, tt.want); diff != "" { t.Errorf("Diff%v", diff) } }) } } func TestServiceName(t *testing.T) { attrs := []attribute.KeyValue{} assert.Equal(t, defaultServiceName, getServiceName(attrs)) attrs = append(attrs, attribute.String("test_key", "test_value")) assert.Equal(t, defaultServiceName, getServiceName(attrs)) attrs = append(attrs, semconv.ServiceName("my_service")) assert.Equal(t, "my_service", getServiceName(attrs)) } opentelemetry-go-1.21.0/exporters/zipkin/zipkin.go000066400000000000000000000121521452547353200222770ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zipkin // import "go.opentelemetry.io/otel/exporters/zipkin" import ( "bytes" "context" "encoding/json" "fmt" "io" "log" "net/http" "net/url" "sync" "github.com/go-logr/logr" "github.com/go-logr/stdr" sdktrace "go.opentelemetry.io/otel/sdk/trace" ) const ( defaultCollectorURL = "http://localhost:9411/api/v2/spans" ) // Exporter exports spans to the zipkin collector. type Exporter struct { url string client *http.Client logger logr.Logger stoppedMu sync.RWMutex stopped bool } var _ sdktrace.SpanExporter = &Exporter{} var emptyLogger = logr.Logger{} // Options contains configuration for the exporter. type config struct { client *http.Client logger logr.Logger } // Option defines a function that configures the exporter. type Option interface { apply(config) config } type optionFunc func(config) config func (fn optionFunc) apply(cfg config) config { return fn(cfg) } // WithLogger configures the exporter to use the passed logger. // WithLogger and WithLogr will overwrite each other. func WithLogger(logger *log.Logger) Option { return WithLogr(stdr.New(logger)) } // WithLogr configures the exporter to use the passed logr.Logger. // WithLogr and WithLogger will overwrite each other. func WithLogr(logger logr.Logger) Option { return optionFunc(func(cfg config) config { cfg.logger = logger return cfg }) } // WithClient configures the exporter to use the passed HTTP client. func WithClient(client *http.Client) Option { return optionFunc(func(cfg config) config { cfg.client = client return cfg }) } // New creates a new Zipkin exporter. func New(collectorURL string, opts ...Option) (*Exporter, error) { if collectorURL == "" { // Use endpoint from env var or default collector URL. collectorURL = envOr(envEndpoint, defaultCollectorURL) } u, err := url.Parse(collectorURL) if err != nil { return nil, fmt.Errorf("invalid collector URL %q: %v", collectorURL, err) } if u.Scheme == "" || u.Host == "" { return nil, fmt.Errorf("invalid collector URL %q: no scheme or host", collectorURL) } cfg := config{} for _, opt := range opts { cfg = opt.apply(cfg) } if cfg.client == nil { cfg.client = http.DefaultClient } return &Exporter{ url: collectorURL, client: cfg.client, logger: cfg.logger, }, nil } // ExportSpans exports spans to a Zipkin receiver. func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { e.stoppedMu.RLock() stopped := e.stopped e.stoppedMu.RUnlock() if stopped { e.logf("exporter stopped, not exporting span batch") return nil } if len(spans) == 0 { e.logf("no spans to export") return nil } models := SpanModels(spans) body, err := json.Marshal(models) if err != nil { return e.errf("failed to serialize zipkin models to JSON: %v", err) } e.logf("about to send a POST request to %s with body %s", e.url, body) req, err := http.NewRequestWithContext(ctx, http.MethodPost, e.url, bytes.NewBuffer(body)) if err != nil { return e.errf("failed to create request to %s: %v", e.url, err) } req.Header.Set("Content-Type", "application/json") resp, err := e.client.Do(req) if err != nil { return e.errf("request to %s failed: %v", e.url, err) } defer resp.Body.Close() // Zipkin API returns a 202 on success and the content of the body isn't interesting // but it is still being read because according to https://golang.org/pkg/net/http/#Response // > The default HTTP client's Transport may not reuse HTTP/1.x "keep-alive" TCP connections // > if the Body is not read to completion and closed. _, err = io.Copy(io.Discard, resp.Body) if err != nil { return e.errf("failed to read response body: %v", err) } if resp.StatusCode != http.StatusAccepted { return e.errf("failed to send spans to zipkin server with status %d", resp.StatusCode) } return nil } // Shutdown stops the exporter flushing any pending exports. func (e *Exporter) Shutdown(ctx context.Context) error { e.stoppedMu.Lock() e.stopped = true e.stoppedMu.Unlock() select { case <-ctx.Done(): return ctx.Err() default: } return nil } func (e *Exporter) logf(format string, args ...interface{}) { if e.logger != emptyLogger { e.logger.Info(fmt.Sprintf(format, args...)) } } func (e *Exporter) errf(format string, args ...interface{}) error { e.logf(format, args...) return fmt.Errorf(format, args...) } // MarshalLog is the marshaling function used by the logging system to represent this exporter. func (e *Exporter) MarshalLog() interface{} { return struct { Type string URL string }{ Type: "zipkin", URL: e.url, } } opentelemetry-go-1.21.0/exporters/zipkin/zipkin_test.go000066400000000000000000000241041452547353200233360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zipkin import ( "bytes" "context" "encoding/json" "fmt" "io" "log" "net" "net/http" "sync" "testing" "time" ottest "go.opentelemetry.io/otel/exporters/zipkin/internal/internaltest" "github.com/go-logr/logr/funcr" zkmodel "github.com/openzipkin/zipkin-go/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" ) func TestNewRawExporter(t *testing.T) { _, err := New( defaultCollectorURL, ) assert.NoError(t, err) } func TestNewRawExporterShouldFailInvalidCollectorURL(t *testing.T) { var ( exp *Exporter err error ) // invalid URL exp, err = New( "localhost", ) assert.Error(t, err) assert.EqualError(t, err, "invalid collector URL \"localhost\": no scheme or host") assert.Nil(t, exp) } func TestNewRawExporterEmptyDefaultCollectorURL(t *testing.T) { var ( exp *Exporter err error ) // use default collector URL if not specified exp, err = New("") assert.NoError(t, err) assert.Equal(t, defaultCollectorURL, exp.url) } func TestNewRawExporterCollectorURLFromEnv(t *testing.T) { var ( exp *Exporter err error ) expectedEndpoint := "http://localhost:19411/api/v2/spans" envStore, err := ottest.SetEnvVariables(map[string]string{ envEndpoint: expectedEndpoint, }) assert.NoError(t, err) defer func() { require.NoError(t, envStore.Restore()) }() exp, err = New("") assert.NoError(t, err) assert.Equal(t, expectedEndpoint, exp.url) } type mockZipkinCollector struct { t *testing.T url string closing bool server *http.Server wg *sync.WaitGroup lock sync.RWMutex models []zkmodel.SpanModel } func startMockZipkinCollector(t *testing.T) *mockZipkinCollector { collector := &mockZipkinCollector{ t: t, closing: false, } listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) collector.url = fmt.Sprintf("http://%s", listener.Addr().String()) server := &http.Server{ Handler: http.HandlerFunc(collector.handler), ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, } collector.server = server wg := &sync.WaitGroup{} wg.Add(1) collector.wg = wg go func() { err := server.Serve(listener) require.True(t, collector.closing) require.Equal(t, http.ErrServerClosed, err) wg.Done() }() return collector } func (c *mockZipkinCollector) handler(w http.ResponseWriter, r *http.Request) { jsonBytes, err := io.ReadAll(r.Body) require.NoError(c.t, err) var models []zkmodel.SpanModel err = json.Unmarshal(jsonBytes, &models) require.NoError(c.t, err) // for some reason we may get the nonUTC timestamps in models, // fix that for midx := range models { models[midx].Timestamp = models[midx].Timestamp.UTC() for aidx := range models[midx].Annotations { models[midx].Annotations[aidx].Timestamp = models[midx].Annotations[aidx].Timestamp.UTC() } } c.lock.Lock() defer c.lock.Unlock() c.models = append(c.models, models...) w.WriteHeader(http.StatusAccepted) } func (c *mockZipkinCollector) Close() { if c.closing { return } c.closing = true server := c.server c.server = nil require.NoError(c.t, server.Shutdown(context.Background())) c.wg.Wait() } func (c *mockZipkinCollector) ModelsLen() int { c.lock.RLock() defer c.lock.RUnlock() return len(c.models) } func (c *mockZipkinCollector) StealModels() []zkmodel.SpanModel { c.lock.Lock() defer c.lock.Unlock() models := c.models c.models = nil return models } type logStore struct { T *testing.T Messages []string } func (s *logStore) Write(p []byte) (n int, err error) { msg := (string)(p) if s.T != nil { s.T.Logf("%s", msg) } s.Messages = append(s.Messages, msg) return len(p), nil } func logStoreLogger(s *logStore) *log.Logger { return log.New(s, "", 0) } func TestExportSpans(t *testing.T) { res := resource.NewSchemaless( semconv.ServiceName("exporter-test"), semconv.ServiceVersion("0.1.0"), ) spans := tracetest.SpanStubs{ // parent { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), SpanKind: trace.SpanKindServer, Name: "foo", StartTime: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 25, 0, 0, time.UTC), Attributes: nil, Events: nil, Status: sdktrace.Status{ Code: codes.Error, Description: "404, file not found", }, Resource: res, }, // child { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xDF, 0xDE, 0xDD, 0xDC, 0xDB, 0xDA, 0xD9, 0xD8}, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}, SpanID: trace.SpanID{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}, }), SpanKind: trace.SpanKindServer, Name: "bar", StartTime: time.Date(2020, time.March, 11, 19, 24, 15, 0, time.UTC), EndTime: time.Date(2020, time.March, 11, 19, 24, 45, 0, time.UTC), Attributes: nil, Events: nil, Status: sdktrace.Status{ Code: codes.Error, Description: "403, forbidden", }, Resource: res, }, }.Snapshots() models := []zkmodel.SpanModel{ // model of parent { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xfffefdfcfbfaf9f8), ParentID: nil, Debug: false, Sampled: nil, Err: nil, }, Name: "foo", Kind: "SERVER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 0, 0, time.UTC), Duration: time.Minute, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "exporter-test", }, RemoteEndpoint: nil, Annotations: nil, Tags: map[string]string{ "otel.status_code": "ERROR", "error": "404, file not found", "service.name": "exporter-test", "service.version": "0.1.0", }, }, // model of child { SpanContext: zkmodel.SpanContext{ TraceID: zkmodel.TraceID{ High: 0x001020304050607, Low: 0x8090a0b0c0d0e0f, }, ID: zkmodel.ID(0xdfdedddcdbdad9d8), ParentID: zkmodelIDPtr(0xfffefdfcfbfaf9f8), Debug: false, Sampled: nil, Err: nil, }, Name: "bar", Kind: "SERVER", Timestamp: time.Date(2020, time.March, 11, 19, 24, 15, 0, time.UTC), Duration: 30 * time.Second, Shared: false, LocalEndpoint: &zkmodel.Endpoint{ ServiceName: "exporter-test", }, RemoteEndpoint: nil, Annotations: nil, Tags: map[string]string{ "otel.status_code": "ERROR", "error": "403, forbidden", "service.name": "exporter-test", "service.version": "0.1.0", }, }, } require.Len(t, models, len(spans)) collector := startMockZipkinCollector(t) defer collector.Close() ls := &logStore{T: t} logger := logStoreLogger(ls) exporter, err := New(collector.url, WithLogger(logger)) require.NoError(t, err) ctx := context.Background() require.Len(t, ls.Messages, 0) require.NoError(t, exporter.ExportSpans(ctx, spans[0:1])) require.Len(t, ls.Messages, 1) require.Contains(t, ls.Messages[0], "send a POST request") ls.Messages = nil require.NoError(t, exporter.ExportSpans(ctx, nil)) require.Len(t, ls.Messages, 1) require.Contains(t, ls.Messages[0], "no spans to export") ls.Messages = nil require.NoError(t, exporter.ExportSpans(ctx, spans[1:2])) require.Contains(t, ls.Messages[0], "send a POST request") checkFunc := func() bool { return collector.ModelsLen() == len(models) } require.Eventually(t, checkFunc, time.Second, 10*time.Millisecond) require.Equal(t, models, collector.StealModels()) } func TestExporterShutdownHonorsTimeout(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() exp, err := New("") require.NoError(t, err) innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) defer innerCancel() <-innerCtx.Done() assert.Errorf(t, exp.Shutdown(innerCtx), context.DeadlineExceeded.Error()) } func TestExporterShutdownHonorsCancel(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() exp, err := New("") require.NoError(t, err) innerCtx, innerCancel := context.WithCancel(ctx) innerCancel() assert.Errorf(t, exp.Shutdown(innerCtx), context.Canceled.Error()) } func TestErrorOnExportShutdownExporter(t *testing.T) { exp, err := New("") require.NoError(t, err) assert.NoError(t, exp.Shutdown(context.Background())) assert.NoError(t, exp.ExportSpans(context.Background(), nil)) } func TestLogrFormatting(t *testing.T) { format := "string %q, int %d" args := []interface{}{"s", 1} var buf bytes.Buffer l := funcr.New(func(prefix, args string) { _, _ = buf.WriteString(fmt.Sprint(prefix, args)) }, funcr.Options{}) exp, err := New("", WithLogr(l)) require.NoError(t, err) exp.logf(format, args...) want := "\"level\"=0 \"msg\"=\"string \\\"s\\\", int 1\"" got := buf.String() assert.Equal(t, want, got) } opentelemetry-go-1.21.0/get_main_pkgs.sh000077500000000000000000000023361452547353200202560ustar00rootroot00000000000000#!/usr/bin/env bash # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -euo pipefail top_dir='.' if [[ $# -gt 0 ]]; then top_dir="${1}" fi p=$(pwd) mod_dirs=() # Note `mapfile` does not exist in older bash versions: # https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash while IFS= read -r line; do mod_dirs+=("$line") done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) for mod_dir in "${mod_dirs[@]}"; do cd "${mod_dir}" while IFS= read -r line; do echo ".${line#${p}}" done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') cd "${p}" done opentelemetry-go-1.21.0/go.mod000066400000000000000000000010071452547353200162100ustar00rootroot00000000000000module go.opentelemetry.io/otel go 1.20 require ( github.com/go-logr/logr v1.3.0 github.com/go-logr/stdr v1.2.2 github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/trace => ./trace replace go.opentelemetry.io/otel/metric => ./metric opentelemetry-go-1.21.0/go.sum000066400000000000000000000026701452547353200162440ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/handler.go000066400000000000000000000036461452547353200170610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" import ( "go.opentelemetry.io/otel/internal/global" ) var ( // Compile-time check global.ErrDelegator implements ErrorHandler. _ ErrorHandler = (*global.ErrDelegator)(nil) // Compile-time check global.ErrLogger implements ErrorHandler. _ ErrorHandler = (*global.ErrLogger)(nil) ) // GetErrorHandler returns the global ErrorHandler instance. // // The default ErrorHandler instance returned will log all errors to STDERR // until an override ErrorHandler is set with SetErrorHandler. All // ErrorHandler returned prior to this will automatically forward errors to // the set instance instead of logging. // // Subsequent calls to SetErrorHandler after the first will not forward errors // to the new ErrorHandler for prior returned instances. func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } // SetErrorHandler sets the global ErrorHandler to h. // // The first time this is called all ErrorHandler previously returned from // GetErrorHandler will send errors to h instead of the default logging // ErrorHandler. Subsequent calls will set the global ErrorHandler, but not // delegate errors to h. func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } // Handle is a convenience function for ErrorHandler().Handle(err). func Handle(err error) { global.Handle(err) } opentelemetry-go-1.21.0/handler_test.go000066400000000000000000000021441452547353200201100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel import ( "testing" "github.com/stretchr/testify/assert" ) type testErrHandler struct { err error } var _ ErrorHandler = &testErrHandler{} func (eh *testErrHandler) Handle(err error) { eh.err = err } func TestGlobalErrorHandler(t *testing.T) { e1 := &testErrHandler{} SetErrorHandler(e1) Handle(assert.AnError) assert.ErrorIs(t, e1.err, assert.AnError) e1.err = nil e2 := &testErrHandler{} SetErrorHandler(e2) GetErrorHandler().Handle(assert.AnError) assert.ErrorIs(t, e2.err, assert.AnError) } opentelemetry-go-1.21.0/internal/000077500000000000000000000000001452547353200167205ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/attribute/000077500000000000000000000000001452547353200207235ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/attribute/attribute.go000066400000000000000000000074111452547353200232600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package attribute provide several helper functions for some commonly used logic of processing attributes. */ package attribute // import "go.opentelemetry.io/otel/internal/attribute" import ( "reflect" ) // BoolSliceValue converts a bool slice into an array with same elements as slice. func BoolSliceValue(v []bool) interface{} { var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) return cp.Elem().Interface() } // Int64SliceValue converts an int64 slice into an array with same elements as slice. func Int64SliceValue(v []int64) interface{} { var zero int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) return cp.Elem().Interface() } // Float64SliceValue converts a float64 slice into an array with same elements as slice. func Float64SliceValue(v []float64) interface{} { var zero float64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) return cp.Elem().Interface() } // StringSliceValue converts a string slice into an array with same elements as slice. func StringSliceValue(v []string) interface{} { var zero string cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) return cp.Elem().Interface() } // AsBoolSlice converts a bool array into a slice into with same elements as array. func AsBoolSlice(v interface{}) []bool { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil } var zero bool correctLen := rv.Len() correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) cpy := reflect.New(correctType) _ = reflect.Copy(cpy.Elem(), rv) return cpy.Elem().Slice(0, correctLen).Interface().([]bool) } // AsInt64Slice converts an int64 array into a slice into with same elements as array. func AsInt64Slice(v interface{}) []int64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil } var zero int64 correctLen := rv.Len() correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) cpy := reflect.New(correctType) _ = reflect.Copy(cpy.Elem(), rv) return cpy.Elem().Slice(0, correctLen).Interface().([]int64) } // AsFloat64Slice converts a float64 array into a slice into with same elements as array. func AsFloat64Slice(v interface{}) []float64 { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil } var zero float64 correctLen := rv.Len() correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) cpy := reflect.New(correctType) _ = reflect.Copy(cpy.Elem(), rv) return cpy.Elem().Slice(0, correctLen).Interface().([]float64) } // AsStringSlice converts a string array into a slice into with same elements as array. func AsStringSlice(v interface{}) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil } var zero string correctLen := rv.Len() correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) cpy := reflect.New(correctType) _ = reflect.Copy(cpy.Elem(), rv) return cpy.Elem().Slice(0, correctLen).Interface().([]string) } opentelemetry-go-1.21.0/internal/attribute/attribute_test.go000066400000000000000000000056701452547353200243240ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package attribute import ( "reflect" "testing" ) var wrapFloat64SliceValue = func(v interface{}) interface{} { if vi, ok := v.([]float64); ok { return Float64SliceValue(vi) } return nil } var wrapInt64SliceValue = func(v interface{}) interface{} { if vi, ok := v.([]int64); ok { return Int64SliceValue(vi) } return nil } var wrapBoolSliceValue = func(v interface{}) interface{} { if vi, ok := v.([]bool); ok { return BoolSliceValue(vi) } return nil } var wrapStringSliceValue = func(v interface{}) interface{} { if vi, ok := v.([]string); ok { return StringSliceValue(vi) } return nil } var ( wrapAsBoolSlice = func(v interface{}) interface{} { return AsBoolSlice(v) } wrapAsInt64Slice = func(v interface{}) interface{} { return AsInt64Slice(v) } wrapAsFloat64Slice = func(v interface{}) interface{} { return AsFloat64Slice(v) } wrapAsStringSlice = func(v interface{}) interface{} { return AsStringSlice(v) } ) func TestSliceValue(t *testing.T) { type args struct { v interface{} } tests := []struct { name string args args want interface{} fn func(interface{}) interface{} }{ { name: "Float64SliceValue() two items", args: args{v: []float64{1, 2.3}}, want: [2]float64{1, 2.3}, fn: wrapFloat64SliceValue, }, { name: "Int64SliceValue() two items", args: args{[]int64{1, 2}}, want: [2]int64{1, 2}, fn: wrapInt64SliceValue, }, { name: "BoolSliceValue() two items", args: args{v: []bool{true, false}}, want: [2]bool{true, false}, fn: wrapBoolSliceValue, }, { name: "StringSliceValue() two items", args: args{[]string{"123", "2"}}, want: [2]string{"123", "2"}, fn: wrapStringSliceValue, }, { name: "AsBoolSlice() two items", args: args{[2]bool{true, false}}, want: []bool{true, false}, fn: wrapAsBoolSlice, }, { name: "AsInt64Slice() two items", args: args{[2]int64{1, 3}}, want: []int64{1, 3}, fn: wrapAsInt64Slice, }, { name: "AsFloat64Slice() two items", args: args{[2]float64{1.2, 3.1}}, want: []float64{1.2, 3.1}, fn: wrapAsFloat64Slice, }, { name: "AsStringSlice() two items", args: args{[2]string{"1234", "12"}}, want: []string{"1234", "12"}, fn: wrapAsStringSlice, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := tt.fn(tt.args.v); !reflect.DeepEqual(got, tt.want) { t.Errorf("got %v, want %v", got, tt.want) } }) } } opentelemetry-go-1.21.0/internal/baggage/000077500000000000000000000000001452547353200202755ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/baggage/baggage.go000066400000000000000000000032101452547353200221750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package baggage provides base types and functionality to store and retrieve baggage in Go context. This package exists because the OpenTracing bridge to OpenTelemetry needs to synchronize state whenever baggage for a context is modified and that context contains an OpenTracing span. If it were not for this need this package would not need to exist and the `go.opentelemetry.io/otel/baggage` package would be the singular place where W3C baggage is handled. */ package baggage // import "go.opentelemetry.io/otel/internal/baggage" // List is the collection of baggage members. The W3C allows for duplicates, // but OpenTelemetry does not, therefore, this is represented as a map. type List map[string]Item // Item is the value and metadata properties part of a list-member. type Item struct { Value string Properties []Property } // Property is a metadata entry for a list-member. type Property struct { Key, Value string // HasValue indicates if a zero-value value means the property does not // have a value or if it was the zero-value. HasValue bool } opentelemetry-go-1.21.0/internal/baggage/context.go000066400000000000000000000051761452547353200223210ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package baggage // import "go.opentelemetry.io/otel/internal/baggage" import "context" type baggageContextKeyType int const baggageKey baggageContextKeyType = iota // SetHookFunc is a callback called when storing baggage in the context. type SetHookFunc func(context.Context, List) context.Context // GetHookFunc is a callback called when getting baggage from the context. type GetHookFunc func(context.Context, List) List type baggageState struct { list List setHook SetHookFunc getHook GetHookFunc } // ContextWithSetHook returns a copy of parent with hook configured to be // invoked every time ContextWithBaggage is called. // // Passing nil SetHookFunc creates a context with no set hook to call. func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { var s baggageState if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } s.setHook = hook return context.WithValue(parent, baggageKey, s) } // ContextWithGetHook returns a copy of parent with hook configured to be // invoked every time FromContext is called. // // Passing nil GetHookFunc creates a context with no get hook to call. func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { var s baggageState if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } s.getHook = hook return context.WithValue(parent, baggageKey, s) } // ContextWithList returns a copy of parent with baggage. Passing nil list // returns a context without any baggage. func ContextWithList(parent context.Context, list List) context.Context { var s baggageState if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } s.list = list ctx := context.WithValue(parent, baggageKey, s) if s.setHook != nil { ctx = s.setHook(ctx, list) } return ctx } // ListFromContext returns the baggage contained in ctx. func ListFromContext(ctx context.Context) List { switch v := ctx.Value(baggageKey).(type) { case baggageState: if v.getHook != nil { return v.getHook(ctx, v.list) } return v.list default: return nil } } opentelemetry-go-1.21.0/internal/baggage/context_test.go000066400000000000000000000054301452547353200233510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package baggage import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestContextWithList(t *testing.T) { ctx := context.Background() l := List{"foo": {Value: "1"}} nCtx := ContextWithList(ctx, l) assert.Equal(t, baggageState{list: l}, nCtx.Value(baggageKey)) assert.Nil(t, ctx.Value(baggageKey)) } func TestClearContextOfList(t *testing.T) { l := List{"foo": {Value: "1"}} ctx := context.Background() ctx = context.WithValue(ctx, baggageKey, l) nCtx := ContextWithList(ctx, nil) nL, ok := nCtx.Value(baggageKey).(baggageState) require.True(t, ok, "wrong type stored in context") assert.Nil(t, nL.list) assert.Equal(t, l, ctx.Value(baggageKey)) } func TestListFromContext(t *testing.T) { ctx := context.Background() assert.Nil(t, ListFromContext(ctx)) l := List{"foo": {Value: "1"}} ctx = context.WithValue(ctx, baggageKey, baggageState{list: l}) assert.Equal(t, l, ListFromContext(ctx)) } func TestContextWithSetHook(t *testing.T) { var called bool f := func(ctx context.Context, list List) context.Context { called = true return ctx } ctx := context.Background() ctx = ContextWithSetHook(ctx, f) assert.False(t, called, "SetHookFunc called when setting hook") ctx = ContextWithList(ctx, nil) assert.True(t, called, "SetHookFunc not called when setting List") // Ensure resetting the hook works. called = false ctx = ContextWithSetHook(ctx, f) assert.False(t, called, "SetHookFunc called when re-setting hook") ContextWithList(ctx, nil) assert.True(t, called, "SetHookFunc not called when re-setting List") } func TestContextWithGetHook(t *testing.T) { var called bool f := func(ctx context.Context, list List) List { called = true return list } ctx := context.Background() ctx = ContextWithGetHook(ctx, f) assert.False(t, called, "GetHookFunc called when setting hook") _ = ListFromContext(ctx) assert.True(t, called, "GetHookFunc not called when getting List") // Ensure resetting the hook works. called = false ctx = ContextWithGetHook(ctx, f) assert.False(t, called, "GetHookFunc called when re-setting hook") _ = ListFromContext(ctx) assert.True(t, called, "GetHookFunc not called when re-getting List") } opentelemetry-go-1.21.0/internal/gen.go000066400000000000000000000041251452547353200200220ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/internal" //go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go //go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go //go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go //go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go //go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go //go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go //go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go //go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go //go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go //go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go //go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go //go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go opentelemetry-go-1.21.0/internal/global/000077500000000000000000000000001452547353200201605ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/global/benchmark_test.go000066400000000000000000000017211452547353200235010ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global import ( "context" "testing" ) func BenchmarkStartEndSpanNoSDK(b *testing.B) { // Compare with BenchmarkStartEndSpan() in // ../../sdk/trace/benchmark_test.go. ResetForTest(b) t := TracerProvider().Tracer("Benchmark StartEndSpan") ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.End() } } opentelemetry-go-1.21.0/internal/global/handler.go000066400000000000000000000061751452547353200221350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" "os" "sync/atomic" ) var ( // GlobalErrorHandler provides an ErrorHandler that can be used // throughout an OpenTelemetry instrumented project. When a user // specified ErrorHandler is registered (`SetErrorHandler`) all calls to // `Handle` and will be delegated to the registered ErrorHandler. GlobalErrorHandler = defaultErrorHandler() // Compile-time check that delegator implements ErrorHandler. _ ErrorHandler = (*ErrDelegator)(nil) // Compile-time check that errLogger implements ErrorHandler. _ ErrorHandler = (*ErrLogger)(nil) ) // ErrorHandler handles irremediable events. type ErrorHandler interface { // Handle handles any error deemed irremediable by an OpenTelemetry // component. Handle(error) } type ErrDelegator struct { delegate atomic.Pointer[ErrorHandler] } func (d *ErrDelegator) Handle(err error) { d.getDelegate().Handle(err) } func (d *ErrDelegator) getDelegate() ErrorHandler { return *d.delegate.Load() } // setDelegate sets the ErrorHandler delegate. func (d *ErrDelegator) setDelegate(eh ErrorHandler) { d.delegate.Store(&eh) } func defaultErrorHandler() *ErrDelegator { d := &ErrDelegator{} d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) return d } // ErrLogger logs errors if no delegate is set, otherwise they are delegated. type ErrLogger struct { l *log.Logger } // Handle logs err if no delegate is set, otherwise it is delegated. func (h *ErrLogger) Handle(err error) { h.l.Print(err) } // GetErrorHandler returns the global ErrorHandler instance. // // The default ErrorHandler instance returned will log all errors to STDERR // until an override ErrorHandler is set with SetErrorHandler. All // ErrorHandler returned prior to this will automatically forward errors to // the set instance instead of logging. // // Subsequent calls to SetErrorHandler after the first will not forward errors // to the new ErrorHandler for prior returned instances. func GetErrorHandler() ErrorHandler { return GlobalErrorHandler } // SetErrorHandler sets the global ErrorHandler to h. // // The first time this is called all ErrorHandler previously returned from // GetErrorHandler will send errors to h instead of the default logging // ErrorHandler. Subsequent calls will set the global ErrorHandler, but not // delegate errors to h. func SetErrorHandler(h ErrorHandler) { GlobalErrorHandler.setDelegate(h) } // Handle is a convenience function for ErrorHandler().Handle(err). func Handle(err error) { GetErrorHandler().Handle(err) } opentelemetry-go-1.21.0/internal/global/handler_test.go000066400000000000000000000127601452547353200231710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global import ( "bytes" "errors" "io" "log" "sync" "testing" "github.com/stretchr/testify/suite" ) type testErrCatcher []string func (l *testErrCatcher) Write(p []byte) (int, error) { msg := bytes.TrimRight(p, "\n") (*l) = append(*l, string(msg)) return len(msg), nil } func (l *testErrCatcher) Reset() { *l = testErrCatcher([]string{}) } func (l *testErrCatcher) Got() []string { return []string(*l) } func causeErr(text string) { Handle(errors.New(text)) } type HandlerTestSuite struct { suite.Suite origHandler ErrorHandler errCatcher *testErrCatcher } func (s *HandlerTestSuite) SetupSuite() { s.errCatcher = new(testErrCatcher) s.origHandler = GlobalErrorHandler.getDelegate() GlobalErrorHandler.setDelegate(&ErrLogger{l: log.New(s.errCatcher, "", 0)}) } func (s *HandlerTestSuite) TearDownSuite() { GlobalErrorHandler.setDelegate(s.origHandler) } func (s *HandlerTestSuite) SetupTest() { s.errCatcher.Reset() } func (s *HandlerTestSuite) TearDownTest() { GlobalErrorHandler.setDelegate(&ErrLogger{l: log.New(s.errCatcher, "", 0)}) } func (s *HandlerTestSuite) TestGlobalHandler() { errs := []string{"one", "two"} GetErrorHandler().Handle(errors.New(errs[0])) Handle(errors.New(errs[1])) s.Assert().Equal(errs, s.errCatcher.Got()) } func (s *HandlerTestSuite) TestDelegatedHandler() { eh := GetErrorHandler() newErrLogger := new(testErrCatcher) SetErrorHandler(&ErrLogger{l: log.New(newErrLogger, "", 0)}) errs := []string{"TestDelegatedHandler"} eh.Handle(errors.New(errs[0])) s.Assert().Equal(errs, newErrLogger.Got()) } func (s *HandlerTestSuite) TestNoDropsOnDelegate() { causeErr("") s.Require().Len(s.errCatcher.Got(), 1) // Change to another Handler. We are testing this is loss-less. newErrLogger := new(testErrCatcher) secondary := &ErrLogger{ l: log.New(newErrLogger, "", 0), } SetErrorHandler(secondary) causeErr("") s.Assert().Len(s.errCatcher.Got(), 1, "original Handler used after delegation") s.Assert().Len(newErrLogger.Got(), 1, "new Handler not used after delegation") } func (s *HandlerTestSuite) TestAllowMultipleSets() { notUsed := new(testErrCatcher) secondary := &ErrLogger{l: log.New(notUsed, "", 0)} SetErrorHandler(secondary) s.Require().Same(GetErrorHandler(), GlobalErrorHandler, "set changed globalErrorHandler") s.Require().Same(GlobalErrorHandler.getDelegate(), secondary, "new Handler not set") tertiary := &ErrLogger{l: log.New(notUsed, "", 0)} SetErrorHandler(tertiary) s.Require().Same(GetErrorHandler(), GlobalErrorHandler, "set changed globalErrorHandler") s.Assert().Same(GlobalErrorHandler.getDelegate(), tertiary, "user Handler not overridden") } func TestHandlerTestSuite(t *testing.T) { suite.Run(t, new(HandlerTestSuite)) } func TestHandlerConcurrentSafe(t *testing.T) { // In order not to pollute the test output. SetErrorHandler(&ErrLogger{log.New(io.Discard, "", 0)}) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() SetErrorHandler(&ErrLogger{log.New(io.Discard, "", 0)}) }() wg.Add(1) go func() { defer wg.Done() Handle(errors.New("error")) }() wg.Wait() reset() } func BenchmarkErrorHandler(b *testing.B) { primary := &ErrLogger{l: log.New(io.Discard, "", 0)} secondary := &ErrLogger{l: log.New(io.Discard, "", 0)} tertiary := &ErrLogger{l: log.New(io.Discard, "", 0)} GlobalErrorHandler.setDelegate(primary) err := errors.New("benchmark error handler") b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { GetErrorHandler().Handle(err) Handle(err) SetErrorHandler(secondary) GetErrorHandler().Handle(err) Handle(err) SetErrorHandler(tertiary) GetErrorHandler().Handle(err) Handle(err) GlobalErrorHandler.setDelegate(primary) } reset() } var eh ErrorHandler func BenchmarkGetDefaultErrorHandler(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { eh = GetErrorHandler() } } func BenchmarkGetDelegatedErrorHandler(b *testing.B) { SetErrorHandler(&ErrLogger{l: log.New(io.Discard, "", 0)}) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { eh = GetErrorHandler() } reset() } func BenchmarkDefaultErrorHandlerHandle(b *testing.B) { GlobalErrorHandler.setDelegate( &ErrLogger{l: log.New(io.Discard, "", 0)}, ) eh := GetErrorHandler() err := errors.New("benchmark default error handler handle") b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { eh.Handle(err) } reset() } func BenchmarkDelegatedErrorHandlerHandle(b *testing.B) { eh := GetErrorHandler() SetErrorHandler(&ErrLogger{l: log.New(io.Discard, "", 0)}) err := errors.New("benchmark delegated error handler handle") b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { eh.Handle(err) } reset() } func BenchmarkSetErrorHandlerDelegation(b *testing.B) { alt := &ErrLogger{l: log.New(io.Discard, "", 0)} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { SetErrorHandler(alt) reset() } } func reset() { GlobalErrorHandler = defaultErrorHandler() } opentelemetry-go-1.21.0/internal/global/instruments.go000066400000000000000000000206621452547353200231100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" import ( "context" "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" ) // unwrapper unwraps to return the underlying instrument implementation. type unwrapper interface { Unwrap() metric.Observable } type afCounter struct { embedded.Float64ObservableCounter metric.Float64Observable name string opts []metric.Float64ObservableCounterOption delegate atomic.Value // metric.Float64ObservableCounter } var ( _ unwrapper = (*afCounter)(nil) _ metric.Float64ObservableCounter = (*afCounter)(nil) ) func (i *afCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableCounter(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *afCounter) Unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableCounter) } return nil } type afUpDownCounter struct { embedded.Float64ObservableUpDownCounter metric.Float64Observable name string opts []metric.Float64ObservableUpDownCounterOption delegate atomic.Value // metric.Float64ObservableUpDownCounter } var ( _ unwrapper = (*afUpDownCounter)(nil) _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) ) func (i *afUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *afUpDownCounter) Unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableUpDownCounter) } return nil } type afGauge struct { embedded.Float64ObservableGauge metric.Float64Observable name string opts []metric.Float64ObservableGaugeOption delegate atomic.Value // metric.Float64ObservableGauge } var ( _ unwrapper = (*afGauge)(nil) _ metric.Float64ObservableGauge = (*afGauge)(nil) ) func (i *afGauge) setDelegate(m metric.Meter) { ctr, err := m.Float64ObservableGauge(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *afGauge) Unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Float64ObservableGauge) } return nil } type aiCounter struct { embedded.Int64ObservableCounter metric.Int64Observable name string opts []metric.Int64ObservableCounterOption delegate atomic.Value // metric.Int64ObservableCounter } var ( _ unwrapper = (*aiCounter)(nil) _ metric.Int64ObservableCounter = (*aiCounter)(nil) ) func (i *aiCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableCounter(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *aiCounter) Unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableCounter) } return nil } type aiUpDownCounter struct { embedded.Int64ObservableUpDownCounter metric.Int64Observable name string opts []metric.Int64ObservableUpDownCounterOption delegate atomic.Value // metric.Int64ObservableUpDownCounter } var ( _ unwrapper = (*aiUpDownCounter)(nil) _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) ) func (i *aiUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *aiUpDownCounter) Unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableUpDownCounter) } return nil } type aiGauge struct { embedded.Int64ObservableGauge metric.Int64Observable name string opts []metric.Int64ObservableGaugeOption delegate atomic.Value // metric.Int64ObservableGauge } var ( _ unwrapper = (*aiGauge)(nil) _ metric.Int64ObservableGauge = (*aiGauge)(nil) ) func (i *aiGauge) setDelegate(m metric.Meter) { ctr, err := m.Int64ObservableGauge(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *aiGauge) Unwrap() metric.Observable { if ctr := i.delegate.Load(); ctr != nil { return ctr.(metric.Int64ObservableGauge) } return nil } // Sync Instruments. type sfCounter struct { embedded.Float64Counter name string opts []metric.Float64CounterOption delegate atomic.Value // metric.Float64Counter } var _ metric.Float64Counter = (*sfCounter)(nil) func (i *sfCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64Counter(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { if ctr := i.delegate.Load(); ctr != nil { ctr.(metric.Float64Counter).Add(ctx, incr, opts...) } } type sfUpDownCounter struct { embedded.Float64UpDownCounter name string opts []metric.Float64UpDownCounterOption delegate atomic.Value // metric.Float64UpDownCounter } var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) func (i *sfUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Float64UpDownCounter(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { if ctr := i.delegate.Load(); ctr != nil { ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) } } type sfHistogram struct { embedded.Float64Histogram name string opts []metric.Float64HistogramOption delegate atomic.Value // metric.Float64Histogram } var _ metric.Float64Histogram = (*sfHistogram)(nil) func (i *sfHistogram) setDelegate(m metric.Meter) { ctr, err := m.Float64Histogram(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { if ctr := i.delegate.Load(); ctr != nil { ctr.(metric.Float64Histogram).Record(ctx, x, opts...) } } type siCounter struct { embedded.Int64Counter name string opts []metric.Int64CounterOption delegate atomic.Value // metric.Int64Counter } var _ metric.Int64Counter = (*siCounter)(nil) func (i *siCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64Counter(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { if ctr := i.delegate.Load(); ctr != nil { ctr.(metric.Int64Counter).Add(ctx, x, opts...) } } type siUpDownCounter struct { embedded.Int64UpDownCounter name string opts []metric.Int64UpDownCounterOption delegate atomic.Value // metric.Int64UpDownCounter } var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) func (i *siUpDownCounter) setDelegate(m metric.Meter) { ctr, err := m.Int64UpDownCounter(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { if ctr := i.delegate.Load(); ctr != nil { ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) } } type siHistogram struct { embedded.Int64Histogram name string opts []metric.Int64HistogramOption delegate atomic.Value // metric.Int64Histogram } var _ metric.Int64Histogram = (*siHistogram)(nil) func (i *siHistogram) setDelegate(m metric.Meter) { ctr, err := m.Int64Histogram(i.name, i.opts...) if err != nil { GetErrorHandler().Handle(err) return } i.delegate.Store(ctr) } func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { if ctr := i.delegate.Load(); ctr != nil { ctr.(metric.Int64Histogram).Record(ctx, x, opts...) } } opentelemetry-go-1.21.0/internal/global/instruments_test.go000066400000000000000000000121041452547353200241370ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global import ( "context" "testing" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/metric/noop" ) func testFloat64ConcurrentSafe(interact func(float64), setDelegate func(metric.Meter)) { done := make(chan struct{}) finish := make(chan struct{}) go func() { defer close(done) for { interact(1) select { case <-finish: return default: } } }() setDelegate(noop.NewMeterProvider().Meter("")) close(finish) <-done } func testInt64ConcurrentSafe(interact func(int64), setDelegate func(metric.Meter)) { done := make(chan struct{}) finish := make(chan struct{}) go func() { defer close(done) for { interact(1) select { case <-finish: return default: } } }() setDelegate(noop.NewMeterProvider().Meter("")) close(finish) <-done } func TestAsyncInstrumentSetDelegateConcurrentSafe(t *testing.T) { // Float64 Instruments t.Run("Float64", func(t *testing.T) { t.Run("Counter", func(t *testing.T) { delegate := &afCounter{} f := func(float64) { _ = delegate.Unwrap() } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) t.Run("UpDownCounter", func(t *testing.T) { delegate := &afUpDownCounter{} f := func(float64) { _ = delegate.Unwrap() } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) t.Run("Gauge", func(t *testing.T) { delegate := &afGauge{} f := func(float64) { _ = delegate.Unwrap() } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) }) // Int64 Instruments t.Run("Int64", func(t *testing.T) { t.Run("Counter", func(t *testing.T) { delegate := &aiCounter{} f := func(int64) { _ = delegate.Unwrap() } testInt64ConcurrentSafe(f, delegate.setDelegate) }) t.Run("UpDownCounter", func(t *testing.T) { delegate := &aiUpDownCounter{} f := func(int64) { _ = delegate.Unwrap() } testInt64ConcurrentSafe(f, delegate.setDelegate) }) t.Run("Gauge", func(t *testing.T) { delegate := &aiGauge{} f := func(int64) { _ = delegate.Unwrap() } testInt64ConcurrentSafe(f, delegate.setDelegate) }) }) } func TestSyncInstrumentSetDelegateConcurrentSafe(t *testing.T) { // Float64 Instruments t.Run("Float64", func(t *testing.T) { t.Run("Counter", func(t *testing.T) { delegate := &sfCounter{} f := func(v float64) { delegate.Add(context.Background(), v) } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) t.Run("UpDownCounter", func(t *testing.T) { delegate := &sfUpDownCounter{} f := func(v float64) { delegate.Add(context.Background(), v) } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) t.Run("Histogram", func(t *testing.T) { delegate := &sfHistogram{} f := func(v float64) { delegate.Record(context.Background(), v) } testFloat64ConcurrentSafe(f, delegate.setDelegate) }) }) // Int64 Instruments t.Run("Int64", func(t *testing.T) { t.Run("Counter", func(t *testing.T) { delegate := &siCounter{} f := func(v int64) { delegate.Add(context.Background(), v) } testInt64ConcurrentSafe(f, delegate.setDelegate) }) t.Run("UpDownCounter", func(t *testing.T) { delegate := &siUpDownCounter{} f := func(v int64) { delegate.Add(context.Background(), v) } testInt64ConcurrentSafe(f, delegate.setDelegate) }) t.Run("Histogram", func(t *testing.T) { delegate := &siHistogram{} f := func(v int64) { delegate.Record(context.Background(), v) } testInt64ConcurrentSafe(f, delegate.setDelegate) }) }) } type testCountingFloatInstrument struct { count int metric.Float64Observable embedded.Float64Counter embedded.Float64UpDownCounter embedded.Float64Histogram embedded.Float64ObservableCounter embedded.Float64ObservableUpDownCounter embedded.Float64ObservableGauge } func (i *testCountingFloatInstrument) observe() { i.count++ } func (i *testCountingFloatInstrument) Add(context.Context, float64, ...metric.AddOption) { i.count++ } func (i *testCountingFloatInstrument) Record(context.Context, float64, ...metric.RecordOption) { i.count++ } type testCountingIntInstrument struct { count int metric.Int64Observable embedded.Int64Counter embedded.Int64UpDownCounter embedded.Int64Histogram embedded.Int64ObservableCounter embedded.Int64ObservableUpDownCounter embedded.Int64ObservableGauge } func (i *testCountingIntInstrument) observe() { i.count++ } func (i *testCountingIntInstrument) Add(context.Context, int64, ...metric.AddOption) { i.count++ } func (i *testCountingIntInstrument) Record(context.Context, int64, ...metric.RecordOption) { i.count++ } opentelemetry-go-1.21.0/internal/global/internal_logging.go000066400000000000000000000044711452547353200240370ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" "os" "sync/atomic" "github.com/go-logr/logr" "github.com/go-logr/stdr" ) // globalLogger is the logging interface used within the otel api and sdk provide details of the internals. // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. var globalLogger atomic.Pointer[logr.Logger] func init() { SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) } // SetLogger overrides the globalLogger with l. // // To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true` // To see Debug messages use a logger with `l.V(8).Enabled() == true`. func SetLogger(l logr.Logger) { globalLogger.Store(&l) } func getLogger() logr.Logger { return *globalLogger.Load() } // Info prints messages about the general state of the API or SDK. // This should usually be less than 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { getLogger().V(4).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { getLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { getLogger().V(8).Info(msg, keysAndValues...) } // Warn prints messages about warnings in the API or SDK. // Not an error but is likely more important than an informational event. func Warn(msg string, keysAndValues ...interface{}) { getLogger().V(1).Info(msg, keysAndValues...) } opentelemetry-go-1.21.0/internal/global/internal_logging_test.go000066400000000000000000000043051452547353200250720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global import ( "bytes" "errors" "io" "log" "sync" "testing" "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "github.com/go-logr/logr/funcr" "github.com/go-logr/stdr" ) func TestLoggerConcurrentSafe(t *testing.T) { var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() SetLogger(stdr.New(log.New(io.Discard, "", 0))) }() wg.Add(1) go func() { defer wg.Done() Info("") }() wg.Wait() reset() } func TestLogLevel(t *testing.T) { tests := []struct { name string verbosity int logF func() want string }{ { name: "Verbosity 0 should log errors.", verbosity: 0, want: `"msg"="foobar" "error"="foobar"`, logF: func() { Error(errors.New("foobar"), "foobar") }, }, { name: "Verbosity 1 should log warnings", verbosity: 1, want: `"level"=1 "msg"="foo"`, logF: func() { Warn("foo") }, }, { name: "Verbosity 4 should log info", verbosity: 4, want: `"level"=4 "msg"="bar"`, logF: func() { Info("bar") }, }, { name: "Verbosity 8 should log debug", verbosity: 8, want: `"level"=8 "msg"="baz"`, logF: func() { Debug("baz") }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var buf bytes.Buffer SetLogger(newBuffLogger(&buf, test.verbosity)) test.logF() assert.Equal(t, test.want, buf.String()) }) } } func newBuffLogger(buf *bytes.Buffer, verbosity int) logr.Logger { return funcr.New(func(prefix, args string) { _, _ = buf.Write([]byte(args)) }, funcr.Options{ Verbosity: verbosity, }) } opentelemetry-go-1.21.0/internal/global/meter.go000066400000000000000000000227021452547353200216260ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" "sync" "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" ) // meterProvider is a placeholder for a configured SDK MeterProvider. // // All MeterProvider functionality is forwarded to a delegate once // configured. type meterProvider struct { embedded.MeterProvider mtx sync.Mutex meters map[il]*meter delegate metric.MeterProvider } // setDelegate configures p to delegate all MeterProvider functionality to // provider. // // All Meters provided prior to this function call are switched out to be // Meters provided by provider. All instruments and callbacks are recreated and // delegated. // // It is guaranteed by the caller that this happens only once. func (p *meterProvider) setDelegate(provider metric.MeterProvider) { p.mtx.Lock() defer p.mtx.Unlock() p.delegate = provider if len(p.meters) == 0 { return } for _, meter := range p.meters { meter.setDelegate(provider) } p.meters = nil } // Meter implements MeterProvider. func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { p.mtx.Lock() defer p.mtx.Unlock() if p.delegate != nil { return p.delegate.Meter(name, opts...) } // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. c := metric.NewMeterConfig(opts...) key := il{ name: name, version: c.InstrumentationVersion(), } if p.meters == nil { p.meters = make(map[il]*meter) } if val, ok := p.meters[key]; ok { return val } t := &meter{name: name, opts: opts} p.meters[key] = t return t } // meter is a placeholder for a metric.Meter. // // All Meter functionality is forwarded to a delegate once configured. // Otherwise, all functionality is forwarded to a NoopMeter. type meter struct { embedded.Meter name string opts []metric.MeterOption mtx sync.Mutex instruments []delegatedInstrument registry list.List delegate atomic.Value // metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // // All subsequent calls to the Meter methods will be passed to the delegate. // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { meter := provider.Meter(m.name, m.opts...) m.delegate.Store(meter) m.mtx.Lock() defer m.mtx.Unlock() for _, inst := range m.instruments { inst.setDelegate(meter) } for e := m.registry.Front(); e != nil; e = e.Next() { r := e.Value.(*registration) r.setDelegate(meter) m.registry.Remove(e) } m.instruments = nil m.registry.Init() } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64Counter(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &siCounter{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64UpDownCounter(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &siUpDownCounter{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64Histogram(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &siHistogram{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableCounter(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &aiCounter{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableUpDownCounter(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &aiUpDownCounter{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Int64ObservableGauge(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &aiGauge{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64Counter(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &sfCounter{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64UpDownCounter(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &sfUpDownCounter{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64Histogram(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &sfHistogram{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableCounter(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &afCounter{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableUpDownCounter(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &afUpDownCounter{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { return del.Float64ObservableGauge(name, options...) } m.mtx.Lock() defer m.mtx.Unlock() i := &afGauge{name: name, opts: options} m.instruments = append(m.instruments, i) return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { if del, ok := m.delegate.Load().(metric.Meter); ok { insts = unwrapInstruments(insts) return del.RegisterCallback(f, insts...) } m.mtx.Lock() defer m.mtx.Unlock() reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { m.mtx.Lock() _ = m.registry.Remove(e) m.mtx.Unlock() return nil } return reg, nil } type wrapped interface { unwrap() metric.Observable } func unwrapInstruments(instruments []metric.Observable) []metric.Observable { out := make([]metric.Observable, 0, len(instruments)) for _, inst := range instruments { if in, ok := inst.(wrapped); ok { out = append(out, in.unwrap()) } else { out = append(out, inst) } } return out } type registration struct { embedded.Registration instruments []metric.Observable function metric.Callback unreg func() error unregMu sync.Mutex } func (c *registration) setDelegate(m metric.Meter) { insts := unwrapInstruments(c.instruments) c.unregMu.Lock() defer c.unregMu.Unlock() if c.unreg == nil { // Unregister already called. return } reg, err := m.RegisterCallback(c.function, insts...) if err != nil { GetErrorHandler().Handle(err) } c.unreg = reg.Unregister } func (c *registration) Unregister() error { c.unregMu.Lock() defer c.unregMu.Unlock() if c.unreg == nil { // Unregister already called. return nil } var err error err, c.unreg = c.unreg(), nil return err } opentelemetry-go-1.21.0/internal/global/meter_test.go000066400000000000000000000260201452547353200226620ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" import ( "context" "fmt" "sync" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" ) func TestMeterProviderConcurrentSafe(t *testing.T) { mp := &meterProvider{} done := make(chan struct{}) finish := make(chan struct{}) go func() { defer close(done) for i := 0; ; i++ { mp.Meter(fmt.Sprintf("a%d", i)) select { case <-finish: return default: } } }() mp.setDelegate(noop.NewMeterProvider()) close(finish) <-done } var zeroCallback metric.Callback = func(ctx context.Context, or metric.Observer) error { return nil } func TestMeterConcurrentSafe(t *testing.T) { mtr := &meter{} wg := &sync.WaitGroup{} wg.Add(1) done := make(chan struct{}) finish := make(chan struct{}) go func() { defer close(done) for i, once := 0, false; ; i++ { name := fmt.Sprintf("a%d", i) _, _ = mtr.Float64ObservableCounter(name) _, _ = mtr.Float64ObservableUpDownCounter(name) _, _ = mtr.Float64ObservableGauge(name) _, _ = mtr.Int64ObservableCounter(name) _, _ = mtr.Int64ObservableUpDownCounter(name) _, _ = mtr.Int64ObservableGauge(name) _, _ = mtr.Float64Counter(name) _, _ = mtr.Float64UpDownCounter(name) _, _ = mtr.Float64Histogram(name) _, _ = mtr.Int64Counter(name) _, _ = mtr.Int64UpDownCounter(name) _, _ = mtr.Int64Histogram(name) _, _ = mtr.RegisterCallback(zeroCallback) if !once { wg.Done() once = true } select { case <-finish: return default: } } }() wg.Wait() mtr.setDelegate(noop.NewMeterProvider()) close(finish) <-done } func TestUnregisterConcurrentSafe(t *testing.T) { mtr := &meter{} reg, err := mtr.RegisterCallback(zeroCallback) require.NoError(t, err) wg := &sync.WaitGroup{} wg.Add(1) done := make(chan struct{}) finish := make(chan struct{}) go func() { defer close(done) for i, once := 0, false; ; i++ { _ = reg.Unregister() if !once { wg.Done() once = true } select { case <-finish: return default: } } }() _ = reg.Unregister() wg.Wait() mtr.setDelegate(noop.NewMeterProvider()) close(finish) <-done } func testSetupAllInstrumentTypes(t *testing.T, m metric.Meter) (metric.Float64Counter, metric.Float64ObservableCounter) { afcounter, err := m.Float64ObservableCounter("test_Async_Counter") require.NoError(t, err) _, err = m.Float64ObservableUpDownCounter("test_Async_UpDownCounter") assert.NoError(t, err) _, err = m.Float64ObservableGauge("test_Async_Gauge") assert.NoError(t, err) _, err = m.Int64ObservableCounter("test_Async_Counter") assert.NoError(t, err) _, err = m.Int64ObservableUpDownCounter("test_Async_UpDownCounter") assert.NoError(t, err) _, err = m.Int64ObservableGauge("test_Async_Gauge") assert.NoError(t, err) _, err = m.RegisterCallback(func(ctx context.Context, obs metric.Observer) error { obs.ObserveFloat64(afcounter, 3) return nil }, afcounter) require.NoError(t, err) sfcounter, err := m.Float64Counter("test_Async_Counter") require.NoError(t, err) _, err = m.Float64UpDownCounter("test_Async_UpDownCounter") assert.NoError(t, err) _, err = m.Float64Histogram("test_Async_Histogram") assert.NoError(t, err) _, err = m.Int64Counter("test_Async_Counter") assert.NoError(t, err) _, err = m.Int64UpDownCounter("test_Async_UpDownCounter") assert.NoError(t, err) _, err = m.Int64Histogram("test_Async_Histogram") assert.NoError(t, err) return sfcounter, afcounter } // This is to emulate a read from an exporter. func testCollect(t *testing.T, m metric.Meter) { if tMeter, ok := m.(*meter); ok { m, ok = tMeter.delegate.Load().(metric.Meter) if !ok { t.Error("meter was not delegated") return } } tMeter, ok := m.(*testMeter) if !ok { t.Error("collect called on non-test Meter") return } tMeter.collect() } func TestMeterProviderDelegatesCalls(t *testing.T) { // The global MeterProvider should directly call the underlying MeterProvider // if it is set prior to Meter() being called. // globalMeterProvider := otel.GetMeterProvider globalMeterProvider := &meterProvider{} mp := &testMeterProvider{} // otel.SetMeterProvider(mp) globalMeterProvider.setDelegate(mp) assert.Equal(t, 0, mp.count) meter := globalMeterProvider.Meter("go.opentelemetry.io/otel/metric/internal/global/meter_test") ctr, actr := testSetupAllInstrumentTypes(t, meter) ctr.Add(context.Background(), 5) testCollect(t, meter) // This is a hacky way to emulate a read from an exporter // Calls to Meter() after setDelegate() should be executed by the delegate require.IsType(t, &testMeter{}, meter) tMeter := meter.(*testMeter) assert.Equal(t, 1, tMeter.afCount) assert.Equal(t, 1, tMeter.afUDCount) assert.Equal(t, 1, tMeter.afGauge) assert.Equal(t, 1, tMeter.aiCount) assert.Equal(t, 1, tMeter.aiUDCount) assert.Equal(t, 1, tMeter.aiGauge) assert.Equal(t, 1, tMeter.sfCount) assert.Equal(t, 1, tMeter.sfUDCount) assert.Equal(t, 1, tMeter.sfHist) assert.Equal(t, 1, tMeter.siCount) assert.Equal(t, 1, tMeter.siUDCount) assert.Equal(t, 1, tMeter.siHist) assert.Equal(t, 1, len(tMeter.callbacks)) // Because the Meter was provided by testmeterProvider it should also return our test instrument require.IsType(t, &testCountingFloatInstrument{}, ctr, "the meter did not delegate calls to the meter") assert.Equal(t, 1, ctr.(*testCountingFloatInstrument).count) require.IsType(t, &testCountingFloatInstrument{}, actr, "the meter did not delegate calls to the meter") assert.Equal(t, 1, actr.(*testCountingFloatInstrument).count) assert.Equal(t, 1, mp.count) } func TestMeterDelegatesCalls(t *testing.T) { // The global MeterProvider should directly provide a Meter instance that // can be updated. If the SetMeterProvider is called after a Meter was // obtained, but before instruments only the instrument should be generated // by the delegated type. globalMeterProvider := &meterProvider{} mp := &testMeterProvider{} assert.Equal(t, 0, mp.count) m := globalMeterProvider.Meter("go.opentelemetry.io/otel/metric/internal/global/meter_test") globalMeterProvider.setDelegate(mp) ctr, actr := testSetupAllInstrumentTypes(t, m) ctr.Add(context.Background(), 5) testCollect(t, m) // This is a hacky way to emulate a read from an exporter // Calls to Meter methods after setDelegate() should be executed by the delegate require.IsType(t, &meter{}, m) tMeter := m.(*meter).delegate.Load().(*testMeter) require.NotNil(t, tMeter) assert.Equal(t, 1, tMeter.afCount) assert.Equal(t, 1, tMeter.afUDCount) assert.Equal(t, 1, tMeter.afGauge) assert.Equal(t, 1, tMeter.aiCount) assert.Equal(t, 1, tMeter.aiUDCount) assert.Equal(t, 1, tMeter.aiGauge) assert.Equal(t, 1, tMeter.sfCount) assert.Equal(t, 1, tMeter.sfUDCount) assert.Equal(t, 1, tMeter.sfHist) assert.Equal(t, 1, tMeter.siCount) assert.Equal(t, 1, tMeter.siUDCount) assert.Equal(t, 1, tMeter.siHist) // Because the Meter was provided by testmeterProvider it should also return our test instrument require.IsType(t, &testCountingFloatInstrument{}, ctr, "the meter did not delegate calls to the meter") assert.Equal(t, 1, ctr.(*testCountingFloatInstrument).count) // Because the Meter was provided by testmeterProvider it should also return our test instrument require.IsType(t, &testCountingFloatInstrument{}, actr, "the meter did not delegate calls to the meter") assert.Equal(t, 1, actr.(*testCountingFloatInstrument).count) assert.Equal(t, 1, mp.count) } func TestMeterDefersDelegations(t *testing.T) { // If SetMeterProvider is called after instruments are registered, the // instruments should be recreated with the new meter. // globalMeterProvider := otel.GetMeterProvider globalMeterProvider := &meterProvider{} m := globalMeterProvider.Meter("go.opentelemetry.io/otel/metric/internal/global/meter_test") ctr, actr := testSetupAllInstrumentTypes(t, m) ctr.Add(context.Background(), 5) mp := &testMeterProvider{} // otel.SetMeterProvider(mp) globalMeterProvider.setDelegate(mp) testCollect(t, m) // This is a hacky way to emulate a read from an exporter // Calls to Meter() before setDelegate() should be the delegated type require.IsType(t, &meter{}, m) tMeter := m.(*meter).delegate.Load().(*testMeter) require.NotNil(t, tMeter) assert.Equal(t, 1, tMeter.afCount) assert.Equal(t, 1, tMeter.afUDCount) assert.Equal(t, 1, tMeter.afGauge) assert.Equal(t, 1, tMeter.aiCount) assert.Equal(t, 1, tMeter.aiUDCount) assert.Equal(t, 1, tMeter.aiGauge) assert.Equal(t, 1, tMeter.sfCount) assert.Equal(t, 1, tMeter.sfUDCount) assert.Equal(t, 1, tMeter.sfHist) assert.Equal(t, 1, tMeter.siCount) assert.Equal(t, 1, tMeter.siUDCount) assert.Equal(t, 1, tMeter.siHist) // Because the Meter was a delegate it should return a delegated instrument assert.IsType(t, &sfCounter{}, ctr) assert.IsType(t, &afCounter{}, actr) assert.Equal(t, 1, mp.count) } func TestRegistrationDelegation(t *testing.T) { // globalMeterProvider := otel.GetMeterProvider globalMeterProvider := &meterProvider{} m := globalMeterProvider.Meter("go.opentelemetry.io/otel/metric/internal/global/meter_test") require.IsType(t, &meter{}, m) mImpl := m.(*meter) actr, err := m.Float64ObservableCounter("test_Async_Counter") require.NoError(t, err) var called0 bool reg0, err := m.RegisterCallback(func(context.Context, metric.Observer) error { called0 = true return nil }, actr) require.NoError(t, err) require.Equal(t, 1, mImpl.registry.Len(), "callback not registered") // This means reg0 should not be delegated. assert.NoError(t, reg0.Unregister()) assert.Equal(t, 0, mImpl.registry.Len(), "callback not unregistered") var called1 bool reg1, err := m.RegisterCallback(func(context.Context, metric.Observer) error { called1 = true return nil }, actr) require.NoError(t, err) require.Equal(t, 1, mImpl.registry.Len(), "second callback not registered") mp := &testMeterProvider{} // otel.SetMeterProvider(mp) globalMeterProvider.setDelegate(mp) testCollect(t, m) // This is a hacky way to emulate a read from an exporter require.False(t, called0, "pre-delegation unregistered callback called") require.True(t, called1, "callback not called") called1 = false assert.NoError(t, reg1.Unregister(), "unregister second callback") testCollect(t, m) // This is a hacky way to emulate a read from an exporter assert.False(t, called1, "unregistered callback called") assert.NotPanics(t, func() { assert.NoError(t, reg1.Unregister(), "duplicate unregister calls") }) } opentelemetry-go-1.21.0/internal/global/meter_types_test.go000066400000000000000000000112501452547353200241050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" import ( "context" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" ) type testMeterProvider struct { embedded.MeterProvider count int } func (p *testMeterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { p.count++ return &testMeter{} } type testMeter struct { embedded.Meter afCount int afUDCount int afGauge int aiCount int aiUDCount int aiGauge int sfCount int sfUDCount int sfHist int siCount int siUDCount int siHist int callbacks []metric.Callback } func (m *testMeter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { m.siCount++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { m.siUDCount++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { m.siHist++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { m.aiCount++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { m.aiUDCount++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { m.aiGauge++ return &testCountingIntInstrument{}, nil } func (m *testMeter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { m.sfCount++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { m.sfUDCount++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { m.sfHist++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { m.afCount++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { m.afUDCount++ return &testCountingFloatInstrument{}, nil } func (m *testMeter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { m.afGauge++ return &testCountingFloatInstrument{}, nil } // RegisterCallback captures the function that will be called during Collect. func (m *testMeter) RegisterCallback(f metric.Callback, i ...metric.Observable) (metric.Registration, error) { m.callbacks = append(m.callbacks, f) return testReg{ f: func(idx int) func() { return func() { m.callbacks[idx] = nil } }(len(m.callbacks) - 1), }, nil } type testReg struct { embedded.Registration f func() } func (r testReg) Unregister() error { r.f() return nil } // This enables async collection. func (m *testMeter) collect() { ctx := context.Background() o := observationRecorder{ctx: ctx} for _, f := range m.callbacks { if f == nil { // Unregister. continue } _ = f(ctx, o) } } type observationRecorder struct { embedded.Observer ctx context.Context } func (o observationRecorder) ObserveFloat64(i metric.Float64Observable, value float64, _ ...metric.ObserveOption) { iImpl, ok := i.(*testCountingFloatInstrument) if ok { iImpl.observe() } } func (o observationRecorder) ObserveInt64(i metric.Int64Observable, value int64, _ ...metric.ObserveOption) { iImpl, ok := i.(*testCountingIntInstrument) if ok { iImpl.observe() } } opentelemetry-go-1.21.0/internal/global/propagator.go000066400000000000000000000053061452547353200226710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" import ( "context" "sync" "go.opentelemetry.io/otel/propagation" ) // textMapPropagator is a default TextMapPropagator that delegates calls to a // registered delegate if one is set, otherwise it defaults to delegating the // calls to a the default no-op propagation.TextMapPropagator. type textMapPropagator struct { mtx sync.Mutex once sync.Once delegate propagation.TextMapPropagator noop propagation.TextMapPropagator } // Compile-time guarantee that textMapPropagator implements the // propagation.TextMapPropagator interface. var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) func newTextMapPropagator() *textMapPropagator { return &textMapPropagator{ noop: propagation.NewCompositeTextMapPropagator(), } } // SetDelegate sets a delegate propagation.TextMapPropagator that all calls are // forwarded to. Delegation can only be performed once, all subsequent calls // perform no delegation. func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { if delegate == nil { return } p.mtx.Lock() p.once.Do(func() { p.delegate = delegate }) p.mtx.Unlock() } // effectiveDelegate returns the current delegate of p if one is set, // otherwise the default noop TextMapPropagator is returned. This method // can be called concurrently. func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { p.mtx.Lock() defer p.mtx.Unlock() if p.delegate != nil { return p.delegate } return p.noop } // Inject set cross-cutting concerns from the Context into the carrier. func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { p.effectiveDelegate().Inject(ctx, carrier) } // Extract reads cross-cutting concerns from the carrier into a Context. func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { return p.effectiveDelegate().Extract(ctx, carrier) } // Fields returns the keys whose values are set with Inject. func (p *textMapPropagator) Fields() []string { return p.effectiveDelegate().Fields() } opentelemetry-go-1.21.0/internal/global/propagator_test.go000066400000000000000000000060631452547353200237310ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global import ( "context" "testing" "go.opentelemetry.io/otel/internal/internaltest" ) func TestTextMapPropagatorDelegation(t *testing.T) { ResetForTest(t) ctx := context.Background() carrier := internaltest.NewTextMapCarrier(nil) // The default should be a noop. initial := TextMapPropagator() initial.Inject(ctx, carrier) ctx = initial.Extract(ctx, carrier) if !carrier.GotN(t, 0) || !carrier.SetN(t, 0) { return } // Make sure the delegate woks as expected. delegate := internaltest.NewTextMapPropagator("test") delegate.Inject(ctx, carrier) ctx = delegate.Extract(ctx, carrier) if !delegate.InjectedN(t, carrier, 1) || !delegate.ExtractedN(t, ctx, 1) { return } // The initial propagator should use the delegate after it is set as the // global. SetTextMapPropagator(delegate) initial.Inject(ctx, carrier) ctx = initial.Extract(ctx, carrier) delegate.InjectedN(t, carrier, 2) delegate.ExtractedN(t, ctx, 2) } func TestTextMapPropagatorDelegationNil(t *testing.T) { ResetForTest(t) ctx := context.Background() carrier := internaltest.NewTextMapCarrier(nil) // The default should be a noop. initial := TextMapPropagator() initial.Inject(ctx, carrier) ctx = initial.Extract(ctx, carrier) if !carrier.GotN(t, 0) || !carrier.SetN(t, 0) { return } // Delegation to nil should not make a change. SetTextMapPropagator(nil) initial.Inject(ctx, carrier) initial.Extract(ctx, carrier) if !carrier.GotN(t, 0) || !carrier.SetN(t, 0) { return } } func TestTextMapPropagatorFields(t *testing.T) { ResetForTest(t) initial := TextMapPropagator() delegate := internaltest.NewTextMapPropagator("test") delegateFields := delegate.Fields() // Sanity check on the initial Fields. if got := initial.Fields(); fieldsEqual(got, delegateFields) { t.Fatalf("testing fields (%v) matched Noop fields (%v)", delegateFields, got) } SetTextMapPropagator(delegate) // Check previous returns from global not correctly delegate. if got := initial.Fields(); !fieldsEqual(got, delegateFields) { t.Errorf("global TextMapPropagator.Fields returned %v instead of delegating, want (%v)", got, delegateFields) } // Check new calls to global. if got := TextMapPropagator().Fields(); !fieldsEqual(got, delegateFields) { t.Errorf("global TextMapPropagator.Fields returned %v, want (%v)", got, delegateFields) } } func fieldsEqual(f1, f2 []string) bool { if len(f1) != len(f2) { return false } for i := range f1 { if f1[i] != f2[i] { return false } } return true } opentelemetry-go-1.21.0/internal/global/state.go000066400000000000000000000106771452547353200216420ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" import ( "errors" "sync" "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) type ( tracerProviderHolder struct { tp trace.TracerProvider } propagatorsHolder struct { tm propagation.TextMapPropagator } meterProviderHolder struct { mp metric.MeterProvider } ) var ( globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() delegateTraceOnce sync.Once delegateTextMapPropagatorOnce sync.Once delegateMeterOnce sync.Once ) // TracerProvider is the internal implementation for global.TracerProvider. func TracerProvider() trace.TracerProvider { return globalTracer.Load().(tracerProviderHolder).tp } // SetTracerProvider is the internal implementation for global.SetTracerProvider. func SetTracerProvider(tp trace.TracerProvider) { current := TracerProvider() if _, cOk := current.(*tracerProvider); cOk { if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { // Do not assign the default delegating TracerProvider to delegate // to itself. Error( errors.New("no delegate configured in tracer provider"), "Setting tracer provider to it's current value. No delegate will be configured", ) return } } delegateTraceOnce.Do(func() { if def, ok := current.(*tracerProvider); ok { def.setDelegate(tp) } }) globalTracer.Store(tracerProviderHolder{tp: tp}) } // TextMapPropagator is the internal implementation for global.TextMapPropagator. func TextMapPropagator() propagation.TextMapPropagator { return globalPropagators.Load().(propagatorsHolder).tm } // SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. func SetTextMapPropagator(p propagation.TextMapPropagator) { current := TextMapPropagator() if _, cOk := current.(*textMapPropagator); cOk { if _, pOk := p.(*textMapPropagator); pOk && current == p { // Do not assign the default delegating TextMapPropagator to // delegate to itself. Error( errors.New("no delegate configured in text map propagator"), "Setting text map propagator to it's current value. No delegate will be configured", ) return } } // For the textMapPropagator already returned by TextMapPropagator // delegate to p. delegateTextMapPropagatorOnce.Do(func() { if def, ok := current.(*textMapPropagator); ok { def.SetDelegate(p) } }) // Return p when subsequent calls to TextMapPropagator are made. globalPropagators.Store(propagatorsHolder{tm: p}) } // MeterProvider is the internal implementation for global.MeterProvider. func MeterProvider() metric.MeterProvider { return globalMeterProvider.Load().(meterProviderHolder).mp } // SetMeterProvider is the internal implementation for global.SetMeterProvider. func SetMeterProvider(mp metric.MeterProvider) { current := MeterProvider() if _, cOk := current.(*meterProvider); cOk { if _, mpOk := mp.(*meterProvider); mpOk && current == mp { // Do not assign the default delegating MeterProvider to delegate // to itself. Error( errors.New("no delegate configured in meter provider"), "Setting meter provider to it's current value. No delegate will be configured", ) return } } delegateMeterOnce.Do(func() { if def, ok := current.(*meterProvider); ok { def.setDelegate(mp) } }) globalMeterProvider.Store(meterProviderHolder{mp: mp}) } func defaultTracerValue() *atomic.Value { v := &atomic.Value{} v.Store(tracerProviderHolder{tp: &tracerProvider{}}) return v } func defaultPropagatorsValue() *atomic.Value { v := &atomic.Value{} v.Store(propagatorsHolder{tm: newTextMapPropagator()}) return v } func defaultMeterProvider() *atomic.Value { v := &atomic.Value{} v.Store(meterProviderHolder{mp: &meterProvider{}}) return v } opentelemetry-go-1.21.0/internal/global/state_test.go000066400000000000000000000113321452547353200226660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/metric" metricnoop "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" tracenoop "go.opentelemetry.io/otel/trace/noop" ) type nonComparableTracerProvider struct { trace.TracerProvider nonComparable func() //nolint:structcheck,unused // This is not called. } type nonComparableMeterProvider struct { metric.MeterProvider nonComparable func() //nolint:structcheck,unused // This is not called. } func TestSetTracerProvider(t *testing.T) { t.Run("Set With default is a noop", func(t *testing.T) { ResetForTest(t) SetTracerProvider(TracerProvider()) tp, ok := TracerProvider().(*tracerProvider) if !ok { t.Fatal("Global TracerProvider should be the default tracer provider") } if tp.delegate != nil { t.Fatal("tracer provider should not delegate when setting itself") } }) t.Run("First Set() should replace the delegate", func(t *testing.T) { ResetForTest(t) SetTracerProvider(tracenoop.NewTracerProvider()) _, ok := TracerProvider().(*tracerProvider) if ok { t.Fatal("Global TracerProvider was not changed") } }) t.Run("Set() should delegate existing TracerProviders", func(t *testing.T) { ResetForTest(t) tp := TracerProvider() SetTracerProvider(tracenoop.NewTracerProvider()) ntp := tp.(*tracerProvider) if ntp.delegate == nil { t.Fatal("The delegated tracer providers should have a delegate") } }) t.Run("non-comparable types should not panic", func(t *testing.T) { ResetForTest(t) tp := nonComparableTracerProvider{} SetTracerProvider(tp) assert.NotPanics(t, func() { SetTracerProvider(tp) }) }) } func TestSetTextMapPropagator(t *testing.T) { t.Run("Set With default is a noop", func(t *testing.T) { ResetForTest(t) SetTextMapPropagator(TextMapPropagator()) tmp, ok := TextMapPropagator().(*textMapPropagator) if !ok { t.Fatal("Global TextMapPropagator should be the default propagator") } if tmp.delegate != nil { t.Fatal("TextMapPropagator should not delegate when setting itself") } }) t.Run("First Set() should replace the delegate", func(t *testing.T) { ResetForTest(t) SetTextMapPropagator(propagation.TraceContext{}) _, ok := TextMapPropagator().(*textMapPropagator) if ok { t.Fatal("Global TextMapPropagator was not changed") } }) t.Run("Set() should delegate existing propagators", func(t *testing.T) { ResetForTest(t) p := TextMapPropagator() SetTextMapPropagator(propagation.TraceContext{}) np := p.(*textMapPropagator) if np.delegate == nil { t.Fatal("The delegated TextMapPropagators should have a delegate") } }) t.Run("non-comparable types should not panic", func(t *testing.T) { ResetForTest(t) // A composite TextMapPropagator is not comparable. prop := propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}) SetTextMapPropagator(prop) assert.NotPanics(t, func() { SetTextMapPropagator(prop) }) }) } func TestSetMeterProvider(t *testing.T) { t.Run("Set With default is a noop", func(t *testing.T) { ResetForTest(t) SetMeterProvider(MeterProvider()) mp, ok := MeterProvider().(*meterProvider) if !ok { t.Fatal("Global MeterProvider should be the default meter provider") } if mp.delegate != nil { t.Fatal("meter provider should not delegate when setting itself") } }) t.Run("First Set() should replace the delegate", func(t *testing.T) { ResetForTest(t) SetMeterProvider(metricnoop.NewMeterProvider()) _, ok := MeterProvider().(*meterProvider) if ok { t.Fatal("Global MeterProvider was not changed") } }) t.Run("Set() should delegate existing Meter Providers", func(t *testing.T) { ResetForTest(t) mp := MeterProvider() SetMeterProvider(metricnoop.NewMeterProvider()) dmp := mp.(*meterProvider) if dmp.delegate == nil { t.Fatal("The delegated meter providers should have a delegate") } }) t.Run("non-comparable types should not panic", func(t *testing.T) { ResetForTest(t) mp := nonComparableMeterProvider{} SetMeterProvider(mp) assert.NotPanics(t, func() { SetMeterProvider(mp) }) }) } opentelemetry-go-1.21.0/internal/global/trace.go000066400000000000000000000132541452547353200216120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global // import "go.opentelemetry.io/otel/internal/global" /* This file contains the forwarding implementation of the TracerProvider used as the default global instance. Prior to initialization of an SDK, Tracers returned by the global TracerProvider will provide no-op functionality. This means that all Span created prior to initialization are no-op Spans. Once an SDK has been initialized, all provided no-op Tracers are swapped for Tracers provided by the SDK defined TracerProvider. However, any Span started prior to this initialization does not change its behavior. Meaning, the Span remains a no-op Span. The implementation to track and swap Tracers locks all new Tracer creation until the swap is complete. This assumes that this operation is not performance-critical. If that assumption is incorrect, be sure to configure an SDK prior to any Tracer creation. */ import ( "context" "sync" "sync/atomic" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) // tracerProvider is a placeholder for a configured SDK TracerProvider. // // All TracerProvider functionality is forwarded to a delegate once // configured. type tracerProvider struct { embedded.TracerProvider mtx sync.Mutex tracers map[il]*tracer delegate trace.TracerProvider } // Compile-time guarantee that tracerProvider implements the TracerProvider // interface. var _ trace.TracerProvider = &tracerProvider{} // setDelegate configures p to delegate all TracerProvider functionality to // provider. // // All Tracers provided prior to this function call are switched out to be // Tracers provided by provider. // // It is guaranteed by the caller that this happens only once. func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { p.mtx.Lock() defer p.mtx.Unlock() p.delegate = provider if len(p.tracers) == 0 { return } for _, t := range p.tracers { t.setDelegate(provider) } p.tracers = nil } // Tracer implements TracerProvider. func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { p.mtx.Lock() defer p.mtx.Unlock() if p.delegate != nil { return p.delegate.Tracer(name, opts...) } // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. c := trace.NewTracerConfig(opts...) key := il{ name: name, version: c.InstrumentationVersion(), } if p.tracers == nil { p.tracers = make(map[il]*tracer) } if val, ok := p.tracers[key]; ok { return val } t := &tracer{name: name, opts: opts, provider: p} p.tracers[key] = t return t } type il struct { name string version string } // tracer is a placeholder for a trace.Tracer. // // All Tracer functionality is forwarded to a delegate once configured. // Otherwise, all functionality is forwarded to a NoopTracer. type tracer struct { embedded.Tracer name string opts []trace.TracerOption provider *tracerProvider delegate atomic.Value } // Compile-time guarantee that tracer implements the trace.Tracer interface. var _ trace.Tracer = &tracer{} // setDelegate configures t to delegate all Tracer functionality to Tracers // created by provider. // // All subsequent calls to the Tracer methods will be passed to the delegate. // // It is guaranteed by the caller that this happens only once. func (t *tracer) setDelegate(provider trace.TracerProvider) { t.delegate.Store(provider.Tracer(t.name, t.opts...)) } // Start implements trace.Tracer by forwarding the call to t.delegate if // set, otherwise it forwards the call to a NoopTracer. func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { delegate := t.delegate.Load() if delegate != nil { return delegate.(trace.Tracer).Start(ctx, name, opts...) } s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} ctx = trace.ContextWithSpan(ctx, s) return ctx, s } // nonRecordingSpan is a minimal implementation of a Span that wraps a // SpanContext. It performs no operations other than to return the wrapped // SpanContext. type nonRecordingSpan struct { embedded.Span sc trace.SpanContext tracer *tracer } var _ trace.Span = nonRecordingSpan{} // SpanContext returns the wrapped SpanContext. func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } // IsRecording always returns false. func (nonRecordingSpan) IsRecording() bool { return false } // SetStatus does nothing. func (nonRecordingSpan) SetStatus(codes.Code, string) {} // SetError does nothing. func (nonRecordingSpan) SetError(bool) {} // SetAttributes does nothing. func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} // End does nothing. func (nonRecordingSpan) End(...trace.SpanEndOption) {} // RecordError does nothing. func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} // SetName does nothing. func (nonRecordingSpan) SetName(string) {} func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } opentelemetry-go-1.21.0/internal/global/trace_test.go000066400000000000000000000156521452547353200226550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global import ( "context" "sync/atomic" "testing" "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) type fnTracerProvider struct { embedded.TracerProvider tracer func(string, ...trace.TracerOption) trace.Tracer } func (fn fnTracerProvider) Tracer(instrumentationName string, opts ...trace.TracerOption) trace.Tracer { return fn.tracer(instrumentationName, opts...) } type fnTracer struct { embedded.Tracer start func(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) } func (fn fnTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { return fn.start(ctx, spanName, opts...) } func TestTraceProviderDelegation(t *testing.T) { ResetForTest(t) // Map of tracers to expected span names. expected := map[string][]string{ "pre": {"span2"}, "post": {"span3"}, "fromSpan": {"span4"}, } ctx := context.Background() gtp := TracerProvider() tracer1 := gtp.Tracer("pre") // This is started before an SDK was registered and should be dropped. _, span1 := tracer1.Start(ctx, "span1") SetTracerProvider(fnTracerProvider{ tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { spans, ok := expected[name] assert.Truef(t, ok, "invalid tracer: %s", name) return fnTracer{ start: func(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { if ok { if len(spans) == 0 { t.Errorf("unexpected span: %s", spanName) } else { var want string want, spans = spans[0], spans[1:] assert.Equal(t, want, spanName) } } return noop.NewTracerProvider().Tracer(name).Start(ctx, spanName) }, } }, }) // This span was started before initialization, it is expected to be dropped. span1.End() // The existing Tracer should have been configured to now use the configured SDK. _, span2 := tracer1.Start(ctx, "span2") span2.End() // The global TracerProvider should now create Tracers that also use the newly configured SDK. tracer2 := gtp.Tracer("post") _, span3 := tracer2.Start(ctx, "span3") span3.End() // The noop-span should still provide access to a usable TracerProvider. _, span4 := span1.TracerProvider().Tracer("fromSpan").Start(ctx, "span4") span4.End() } func TestTraceProviderDelegates(t *testing.T) { ResetForTest(t) // Retrieve the placeholder TracerProvider. gtp := TracerProvider() // Configure it with a spy. called := false SetTracerProvider(fnTracerProvider{ tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { called = true assert.Equal(t, "abc", name) return noop.NewTracerProvider().Tracer("") }, }) gtp.Tracer("abc", trace.WithInstrumentationVersion("xyz")) assert.True(t, called, "expected configured TraceProvider to be called") } func TestTraceProviderDelegatesConcurrentSafe(t *testing.T) { ResetForTest(t) // Retrieve the placeholder TracerProvider. gtp := TracerProvider() done := make(chan struct{}) quit := make(chan struct{}) go func() { defer close(done) for { select { case <-time.After(1 * time.Millisecond): gtp.Tracer("abc", trace.WithInstrumentationVersion("xyz")) case <-quit: return } } }() // Wait for the goroutine to make some calls before installing the provider. <-time.After(100 * time.Millisecond) // Configure it with a spy. called := int32(0) SetTracerProvider(fnTracerProvider{ tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { newVal := atomic.AddInt32(&called, 1) assert.Equal(t, "abc", name) if newVal == 10 { // Signal the goroutine to finish. close(quit) } return noop.NewTracerProvider().Tracer("") }, }) // Wait for the go routine to finish <-done assert.LessOrEqual(t, int32(10), atomic.LoadInt32(&called), "expected configured TraceProvider to be called") } func TestTracerDelegatesConcurrentSafe(t *testing.T) { ResetForTest(t) // Retrieve the placeholder TracerProvider. gtp := TracerProvider() tracer := gtp.Tracer("abc", trace.WithInstrumentationVersion("xyz")) done := make(chan struct{}) quit := make(chan struct{}) go func() { defer close(done) for { select { case <-time.After(1 * time.Millisecond): tracer.Start(context.Background(), "name") case <-quit: return } } }() // Wait for the goroutine to make some calls before installing the provider. <-time.After(100 * time.Millisecond) // Configure it with a spy. called := int32(0) SetTracerProvider(fnTracerProvider{ tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { assert.Equal(t, "abc", name) return fnTracer{ start: func(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { newVal := atomic.AddInt32(&called, 1) assert.Equal(t, "name", spanName) if newVal == 10 { // Signal the goroutine to finish. close(quit) } return noop.NewTracerProvider().Tracer("").Start(ctx, spanName) }, } }, }) // Wait for the go routine to finish <-done assert.LessOrEqual(t, int32(10), atomic.LoadInt32(&called), "expected configured TraceProvider to be called") } func TestTraceProviderDelegatesSameInstance(t *testing.T) { ResetForTest(t) // Retrieve the placeholder TracerProvider. gtp := TracerProvider() tracer := gtp.Tracer("abc", trace.WithInstrumentationVersion("xyz")) assert.Same(t, tracer, gtp.Tracer("abc", trace.WithInstrumentationVersion("xyz"))) assert.Same(t, tracer, gtp.Tracer("abc", trace.WithInstrumentationVersion("xyz"))) SetTracerProvider(fnTracerProvider{ tracer: func(name string, opts ...trace.TracerOption) trace.Tracer { return noop.NewTracerProvider().Tracer("") }, }) assert.NotSame(t, tracer, gtp.Tracer("abc", trace.WithInstrumentationVersion("xyz"))) } func TestSpanContextPropagatedWithNonRecordingSpan(t *testing.T) { ResetForTest(t) sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{0x01}, SpanID: [8]byte{0x01}, TraceFlags: trace.FlagsSampled, Remote: true, }) ctx := trace.ContextWithSpanContext(context.Background(), sc) _, span := TracerProvider().Tracer("test").Start(ctx, "test") assert.Equal(t, sc, span.SpanContext()) assert.False(t, span.IsRecording()) } opentelemetry-go-1.21.0/internal/global/util_test.go000066400000000000000000000020351452547353200225230ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package global import ( "sync" "testing" ) // ResetForTest configures the test to restores the initial global state during // its Cleanup step. func ResetForTest(t testing.TB) { t.Cleanup(func() { globalTracer = defaultTracerValue() globalPropagators = defaultPropagatorsValue() globalMeterProvider = defaultMeterProvider() delegateTraceOnce = sync.Once{} delegateTextMapPropagatorOnce = sync.Once{} delegateMeterOnce = sync.Once{} }) } opentelemetry-go-1.21.0/internal/internaltest/000077500000000000000000000000001452547353200214345ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/internaltest/alignment.go000066400000000000000000000045151452547353200237460ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/alignment.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/internal/internaltest" /* This file contains common utilities and objects to validate memory alignment of Go types. The primary use of this functionality is intended to ensure `struct` fields that need to be 64-bit aligned so they can be passed as arguments to 64-bit atomic operations. The common workflow is to define a slice of `FieldOffset` and pass them to the `Aligned8Byte` function from within a `TestMain` function from a package's tests. It is important to make this call from the `TestMain` function prior to running the rest of the test suit as it can provide useful diagnostics about field alignment instead of ambiguous nil pointer dereference and runtime panic. For more information: https://github.com/open-telemetry/opentelemetry-go/issues/341 */ import ( "fmt" "io" ) // FieldOffset is a preprocessor representation of a struct field alignment. type FieldOffset struct { // Name of the field. Name string // Offset of the field in bytes. // // To compute this at compile time use unsafe.Offsetof. Offset uintptr } // Aligned8Byte returns if all fields are aligned modulo 8-bytes. // // Error messaging is printed to out for any field determined misaligned. func Aligned8Byte(fields []FieldOffset, out io.Writer) bool { misaligned := make([]FieldOffset, 0) for _, f := range fields { if f.Offset%8 != 0 { misaligned = append(misaligned, f) } } if len(misaligned) == 0 { return true } fmt.Fprintln(out, "struct fields not aligned for 64-bit atomic operations:") for _, f := range misaligned { fmt.Fprintf(out, " %s: %d-byte offset\n", f.Name, f.Offset) } return false } opentelemetry-go-1.21.0/internal/internaltest/env.go000066400000000000000000000041141452547353200225530ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/env.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/internal/internaltest" import ( "os" ) type Env struct { Name string Value string Exists bool } // EnvStore stores and recovers environment variables. type EnvStore interface { // Records the environment variable into the store. Record(key string) // Restore recovers the environment variables in the store. Restore() error } var _ EnvStore = (*envStore)(nil) type envStore struct { store map[string]Env } func (s *envStore) add(env Env) { s.store[env.Name] = env } func (s *envStore) Restore() error { var err error for _, v := range s.store { if v.Exists { err = os.Setenv(v.Name, v.Value) } else { err = os.Unsetenv(v.Name) } if err != nil { return err } } return nil } func (s *envStore) setEnv(key, value string) error { s.Record(key) err := os.Setenv(key, value) if err != nil { return err } return nil } func (s *envStore) Record(key string) { originValue, exists := os.LookupEnv(key) s.add(Env{ Name: key, Value: originValue, Exists: exists, }) } func NewEnvStore() EnvStore { return newEnvStore() } func newEnvStore() *envStore { return &envStore{store: make(map[string]Env)} } func SetEnvVariables(env map[string]string) (EnvStore, error) { envStore := newEnvStore() for k, v := range env { err := envStore.setEnv(k, v) if err != nil { return nil, err } } return envStore, nil } opentelemetry-go-1.21.0/internal/internaltest/env_test.go000066400000000000000000000114731452547353200236200ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/env_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) type EnvStoreTestSuite struct { suite.Suite } func (s *EnvStoreTestSuite) Test_add() { envStore := newEnvStore() e := Env{ Name: "name", Value: "value", Exists: true, } envStore.add(e) envStore.add(e) s.Assert().Len(envStore.store, 1) } func (s *EnvStoreTestSuite) TestRecord() { testCases := []struct { name string env Env expectedEnvStore *envStore }{ { name: "record exists env", env: Env{ Name: "name", Value: "value", Exists: true, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "value", Exists: true, }, }}, }, { name: "record exists env, but its value is empty", env: Env{ Name: "name", Value: "", Exists: true, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "", Exists: true, }, }}, }, { name: "record not exists env", env: Env{ Name: "name", Exists: false, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Exists: false, }, }}, }, } for _, tc := range testCases { s.Run(tc.name, func() { if tc.env.Exists { s.Assert().NoError(os.Setenv(tc.env.Name, tc.env.Value)) } envStore := newEnvStore() envStore.Record(tc.env.Name) s.Assert().Equal(tc.expectedEnvStore, envStore) if tc.env.Exists { s.Assert().NoError(os.Unsetenv(tc.env.Name)) } }) } } func (s *EnvStoreTestSuite) TestRestore() { testCases := []struct { name string env Env expectedEnvValue string expectedEnvExists bool }{ { name: "exists env", env: Env{ Name: "name", Value: "value", Exists: true, }, expectedEnvValue: "value", expectedEnvExists: true, }, { name: "no exists env", env: Env{ Name: "name", Exists: false, }, expectedEnvExists: false, }, } for _, tc := range testCases { s.Run(tc.name, func() { envStore := newEnvStore() envStore.add(tc.env) // Backup backup := newEnvStore() backup.Record(tc.env.Name) s.Require().NoError(os.Unsetenv(tc.env.Name)) s.Assert().NoError(envStore.Restore()) v, exists := os.LookupEnv(tc.env.Name) s.Assert().Equal(tc.expectedEnvValue, v) s.Assert().Equal(tc.expectedEnvExists, exists) // Restore s.Require().NoError(backup.Restore()) }) } } func (s *EnvStoreTestSuite) Test_setEnv() { testCases := []struct { name string key string value string expectedEnvStore *envStore expectedEnvValue string expectedEnvExists bool }{ { name: "normal", key: "name", value: "value", expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "other value", Exists: true, }, }}, expectedEnvValue: "value", expectedEnvExists: true, }, } for _, tc := range testCases { s.Run(tc.name, func() { envStore := newEnvStore() // Backup backup := newEnvStore() backup.Record(tc.key) s.Require().NoError(os.Setenv(tc.key, "other value")) s.Assert().NoError(envStore.setEnv(tc.key, tc.value)) s.Assert().Equal(tc.expectedEnvStore, envStore) v, exists := os.LookupEnv(tc.key) s.Assert().Equal(tc.expectedEnvValue, v) s.Assert().Equal(tc.expectedEnvExists, exists) // Restore s.Require().NoError(backup.Restore()) }) } } func TestEnvStoreTestSuite(t *testing.T) { suite.Run(t, new(EnvStoreTestSuite)) } func TestSetEnvVariables(t *testing.T) { envs := map[string]string{ "name1": "value1", "name2": "value2", } // Backup backup := newEnvStore() for k := range envs { backup.Record(k) } defer func() { require.NoError(t, backup.Restore()) }() store, err := SetEnvVariables(envs) assert.NoError(t, err) require.IsType(t, &envStore{}, store) concreteStore := store.(*envStore) assert.Len(t, concreteStore.store, 2) assert.Equal(t, backup, concreteStore) } opentelemetry-go-1.21.0/internal/internaltest/errors.go000066400000000000000000000016611452547353200233030ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/errors.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/internal/internaltest" type TestError string var _ error = TestError("") func NewTestError(s string) error { return TestError(s) } func (e TestError) Error() string { return string(e) } opentelemetry-go-1.21.0/internal/internaltest/harness.go000066400000000000000000000216511452547353200234330ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/harness.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/internal/internaltest" import ( "context" "fmt" "sync" "testing" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/internal/matchers" "go.opentelemetry.io/otel/trace" ) // Harness is a testing harness used to test implementations of the // OpenTelemetry API. type Harness struct { t *testing.T } // NewHarness returns an instantiated *Harness using t. func NewHarness(t *testing.T) *Harness { return &Harness{ t: t, } } // TestTracerProvider runs validation tests for an implementation of the OpenTelemetry // TracerProvider API. func (h *Harness) TestTracerProvider(subjectFactory func() trace.TracerProvider) { h.t.Run("#Start", func(t *testing.T) { t.Run("allow creating an arbitrary number of TracerProvider instances", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) tp1 := subjectFactory() tp2 := subjectFactory() e.Expect(tp1).NotToEqual(tp2) }) t.Run("all methods are safe to be called concurrently", func(t *testing.T) { t.Parallel() runner := func(tp trace.TracerProvider) <-chan struct{} { done := make(chan struct{}) go func(tp trace.TracerProvider) { var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func(name, version string) { _ = tp.Tracer(name, trace.WithInstrumentationVersion(version)) wg.Done() }(fmt.Sprintf("tracer %d", i%5), fmt.Sprintf("%d", i)) } wg.Wait() done <- struct{}{} }(tp) return done } matchers.NewExpecter(t).Expect(func() { // Run with multiple TracerProvider to ensure they encapsulate // their own Tracers. tp1 := subjectFactory() tp2 := subjectFactory() done1 := runner(tp1) done2 := runner(tp2) <-done1 <-done2 }).NotToPanic() }) }) } // TestTracer runs validation tests for an implementation of the OpenTelemetry // Tracer API. func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) { h.t.Run("#Start", func(t *testing.T) { t.Run("propagates the original context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctxKey := testCtxKey{} ctxValue := "ctx value" ctx := context.WithValue(context.Background(), ctxKey, ctxValue) ctx, _ = subject.Start(ctx, "test") e.Expect(ctx.Value(ctxKey)).ToEqual(ctxValue) }) t.Run("returns a span containing the expected properties", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, span := subject.Start(context.Background(), "test") e.Expect(span).NotToBeNil() e.Expect(span.SpanContext().IsValid()).ToBeTrue() }) t.Run("stores the span on the provided context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, span := subject.Start(context.Background(), "test") e.Expect(span).NotToBeNil() e.Expect(span.SpanContext()).NotToEqual(trace.SpanContext{}) e.Expect(trace.SpanFromContext(ctx)).ToEqual(span) }) t.Run("starts spans with unique trace and span IDs", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, span1 := subject.Start(context.Background(), "span1") _, span2 := subject.Start(context.Background(), "span2") sc1 := span1.SpanContext() sc2 := span2.SpanContext() e.Expect(sc1.TraceID()).NotToEqual(sc2.TraceID()) e.Expect(sc1.SpanID()).NotToEqual(sc2.SpanID()) }) t.Run("propagates a parent's trace ID through the context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, parent := subject.Start(context.Background(), "parent") _, child := subject.Start(ctx, "child") psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("ignores parent's trace ID when new root is requested", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, parent := subject.Start(context.Background(), "parent") _, child := subject.Start(ctx, "child", trace.WithNewRoot()) psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).NotToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("propagates remote parent's trace ID through the context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, remoteParent := subject.Start(context.Background(), "remote parent") parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext()) _, child := subject.Start(parentCtx, "child") psc := remoteParent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("ignores remote parent's trace ID when new root is requested", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, remoteParent := subject.Start(context.Background(), "remote parent") parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext()) _, child := subject.Start(parentCtx, "child", trace.WithNewRoot()) psc := remoteParent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).NotToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("all methods are safe to be called concurrently", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) tracer := subjectFactory() ctx, parent := tracer.Start(context.Background(), "span") runner := func(tp trace.Tracer) <-chan struct{} { done := make(chan struct{}) go func(tp trace.Tracer) { var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func(name string) { defer wg.Done() _, child := tp.Start(ctx, name) psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }(fmt.Sprintf("span %d", i)) } wg.Wait() done <- struct{}{} }(tp) return done } e.Expect(func() { done := runner(tracer) <-done }).NotToPanic() }) }) h.testSpan(subjectFactory) } func (h *Harness) testSpan(tracerFactory func() trace.Tracer) { methods := map[string]func(span trace.Span){ "#End": func(span trace.Span) { span.End() }, "#AddEvent": func(span trace.Span) { span.AddEvent("test event") }, "#AddEventWithTimestamp": func(span trace.Span) { span.AddEvent("test event", trace.WithTimestamp(time.Now().Add(1*time.Second))) }, "#SetStatus": func(span trace.Span) { span.SetStatus(codes.Error, "internal") }, "#SetName": func(span trace.Span) { span.SetName("new name") }, "#SetAttributes": func(span trace.Span) { span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123)) }, } mechanisms := map[string]func() trace.Span{ "Span created via Tracer#Start": func() trace.Span { tracer := tracerFactory() _, subject := tracer.Start(context.Background(), "test") return subject }, "Span created via span.TracerProvider()": func() trace.Span { ctx, spanA := tracerFactory().Start(context.Background(), "span1") _, spanB := spanA.TracerProvider().Tracer("second").Start(ctx, "span2") return spanB }, } for mechanismName, mechanism := range mechanisms { h.t.Run(mechanismName, func(t *testing.T) { for methodName, method := range methods { t.Run(methodName, func(t *testing.T) { t.Run("is thread-safe", func(t *testing.T) { t.Parallel() span := mechanism() wg := &sync.WaitGroup{} wg.Add(2) go func() { defer wg.Done() method(span) }() go func() { defer wg.Done() method(span) }() wg.Wait() }) }) } t.Run("#End", func(t *testing.T) { t.Run("can be called multiple times", func(t *testing.T) { t.Parallel() span := mechanism() span.End() span.End() }) }) }) } } type testCtxKey struct{} opentelemetry-go-1.21.0/internal/internaltest/text_map_carrier.go000066400000000000000000000073011452547353200253140ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_carrier.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/internal/internaltest" import ( "sync" "testing" "go.opentelemetry.io/otel/propagation" ) // TextMapCarrier is a storage medium for a TextMapPropagator used in testing. // The methods of a TextMapCarrier are concurrent safe. type TextMapCarrier struct { mtx sync.Mutex gets []string sets [][2]string data map[string]string } var _ propagation.TextMapCarrier = (*TextMapCarrier)(nil) // NewTextMapCarrier returns a new *TextMapCarrier populated with data. func NewTextMapCarrier(data map[string]string) *TextMapCarrier { copied := make(map[string]string, len(data)) for k, v := range data { copied[k] = v } return &TextMapCarrier{data: copied} } // Keys returns the keys for which this carrier has a value. func (c *TextMapCarrier) Keys() []string { c.mtx.Lock() defer c.mtx.Unlock() result := make([]string, 0, len(c.data)) for k := range c.data { result = append(result, k) } return result } // Get returns the value associated with the passed key. func (c *TextMapCarrier) Get(key string) string { c.mtx.Lock() defer c.mtx.Unlock() c.gets = append(c.gets, key) return c.data[key] } // GotKey tests if c.Get has been called for key. func (c *TextMapCarrier) GotKey(t *testing.T, key string) bool { c.mtx.Lock() defer c.mtx.Unlock() for _, k := range c.gets { if k == key { return true } } t.Errorf("TextMapCarrier.Get(%q) has not been called", key) return false } // GotN tests if n calls to c.Get have been made. func (c *TextMapCarrier) GotN(t *testing.T, n int) bool { c.mtx.Lock() defer c.mtx.Unlock() if len(c.gets) != n { t.Errorf("TextMapCarrier.Get was called %d times, not %d", len(c.gets), n) return false } return true } // Set stores the key-value pair. func (c *TextMapCarrier) Set(key, value string) { c.mtx.Lock() defer c.mtx.Unlock() c.sets = append(c.sets, [2]string{key, value}) c.data[key] = value } // SetKeyValue tests if c.Set has been called for the key-value pair. func (c *TextMapCarrier) SetKeyValue(t *testing.T, key, value string) bool { c.mtx.Lock() defer c.mtx.Unlock() var vals []string for _, pair := range c.sets { if key == pair[0] { if value == pair[1] { return true } vals = append(vals, pair[1]) } } if len(vals) > 0 { t.Errorf("TextMapCarrier.Set called with %q and %v values, but not %s", key, vals, value) } t.Errorf("TextMapCarrier.Set(%q,%q) has not been called", key, value) return false } // SetN tests if n calls to c.Set have been made. func (c *TextMapCarrier) SetN(t *testing.T, n int) bool { c.mtx.Lock() defer c.mtx.Unlock() if len(c.sets) != n { t.Errorf("TextMapCarrier.Set was called %d times, not %d", len(c.sets), n) return false } return true } // Reset zeros out the recording state and sets the carried values to data. func (c *TextMapCarrier) Reset(data map[string]string) { copied := make(map[string]string, len(data)) for k, v := range data { copied[k] = v } c.mtx.Lock() defer c.mtx.Unlock() c.gets = nil c.sets = nil c.data = copied } opentelemetry-go-1.21.0/internal/internaltest/text_map_carrier_test.go000066400000000000000000000046051452547353200263570ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_carrier_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "reflect" "testing" ) var key, value = "test", "true" func TestTextMapCarrierKeys(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) expected, actual := []string{key}, tmc.Keys() if !reflect.DeepEqual(actual, expected) { t.Errorf("expected tmc.Keys() to be %v but it was %v", expected, actual) } } func TestTextMapCarrierGet(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) tmc.GotN(t, 0) if got := tmc.Get("empty"); got != "" { t.Errorf("TextMapCarrier.Get returned %q for an empty key", got) } tmc.GotKey(t, "empty") tmc.GotN(t, 1) if got := tmc.Get(key); got != value { t.Errorf("TextMapCarrier.Get(%q) returned %q, want %q", key, got, value) } tmc.GotKey(t, key) tmc.GotN(t, 2) } func TestTextMapCarrierSet(t *testing.T) { tmc := NewTextMapCarrier(nil) tmc.SetN(t, 0) tmc.Set(key, value) if got, ok := tmc.data[key]; !ok { t.Errorf("TextMapCarrier.Set(%q,%q) failed to store pair", key, value) } else if got != value { t.Errorf("TextMapCarrier.Set(%q,%q) stored (%q,%q), not (%q,%q)", key, value, key, got, key, value) } tmc.SetKeyValue(t, key, value) tmc.SetN(t, 1) } func TestTextMapCarrierReset(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) tmc.GotN(t, 0) tmc.SetN(t, 0) tmc.Reset(nil) tmc.GotN(t, 0) tmc.SetN(t, 0) if got := tmc.Get(key); got != "" { t.Error("TextMapCarrier.Reset() failed to clear initial data") } tmc.GotN(t, 1) tmc.GotKey(t, key) tmc.Set(key, value) tmc.SetKeyValue(t, key, value) tmc.SetN(t, 1) tmc.Reset(nil) tmc.GotN(t, 0) tmc.SetN(t, 0) if got := tmc.Get(key); got != "" { t.Error("TextMapCarrier.Reset() failed to clear data") } } opentelemetry-go-1.21.0/internal/internaltest/text_map_propagator.go000066400000000000000000000067431452547353200260540ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_propagator.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/internal/internaltest" import ( "context" "fmt" "strconv" "strings" "testing" "go.opentelemetry.io/otel/propagation" ) type ctxKeyType string type state struct { Injections uint64 Extractions uint64 } func newState(encoded string) state { if encoded == "" { return state{} } s0, s1, _ := strings.Cut(encoded, ",") injects, _ := strconv.ParseUint(s0, 10, 64) extracts, _ := strconv.ParseUint(s1, 10, 64) return state{ Injections: injects, Extractions: extracts, } } func (s state) String() string { return fmt.Sprintf("%d,%d", s.Injections, s.Extractions) } // TextMapPropagator is a propagation.TextMapPropagator used for testing. type TextMapPropagator struct { name string ctxKey ctxKeyType } var _ propagation.TextMapPropagator = (*TextMapPropagator)(nil) // NewTextMapPropagator returns a new TextMapPropagator for testing. It will // use name as the key it injects into a TextMapCarrier when Inject is called. func NewTextMapPropagator(name string) *TextMapPropagator { return &TextMapPropagator{name: name, ctxKey: ctxKeyType(name)} } func (p *TextMapPropagator) stateFromContext(ctx context.Context) state { if v := ctx.Value(p.ctxKey); v != nil { if s, ok := v.(state); ok { return s } } return state{} } func (p *TextMapPropagator) stateFromCarrier(carrier propagation.TextMapCarrier) state { return newState(carrier.Get(p.name)) } // Inject sets cross-cutting concerns for p from ctx into carrier. func (p *TextMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { s := p.stateFromContext(ctx) s.Injections++ carrier.Set(p.name, s.String()) } // InjectedN tests if p has made n injections to carrier. func (p *TextMapPropagator) InjectedN(t *testing.T, carrier *TextMapCarrier, n int) bool { if actual := p.stateFromCarrier(carrier).Injections; actual != uint64(n) { t.Errorf("TextMapPropagator{%q} injected %d times, not %d", p.name, actual, n) return false } return true } // Extract reads cross-cutting concerns for p from carrier into ctx. func (p *TextMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { s := p.stateFromCarrier(carrier) s.Extractions++ return context.WithValue(ctx, p.ctxKey, s) } // ExtractedN tests if p has made n extractions from the lineage of ctx. // nolint (context is not first arg) func (p *TextMapPropagator) ExtractedN(t *testing.T, ctx context.Context, n int) bool { if actual := p.stateFromContext(ctx).Extractions; actual != uint64(n) { t.Errorf("TextMapPropagator{%q} extracted %d time, not %d", p.name, actual, n) return false } return true } // Fields returns the name of p as the key who's value is set with Inject. func (p *TextMapPropagator) Fields() []string { return []string{p.name} } opentelemetry-go-1.21.0/internal/internaltest/text_map_propagator_test.go000066400000000000000000000044141452547353200271040ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_propagator_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "context" "testing" ) func TestTextMapPropagatorInjectExtract(t *testing.T) { name := "testing" ctx := context.Background() carrier := NewTextMapCarrier(map[string]string{name: value}) propagator := NewTextMapPropagator(name) propagator.Inject(ctx, carrier) // Carrier value overridden with state. if carrier.SetKeyValue(t, name, "1,0") { // Ensure nothing has been extracted yet. propagator.ExtractedN(t, ctx, 0) // Test the injection was counted. propagator.InjectedN(t, carrier, 1) } ctx = propagator.Extract(ctx, carrier) v := ctx.Value(ctxKeyType(name)) if v == nil { t.Error("TextMapPropagator.Extract failed to extract state") } if s, ok := v.(state); !ok { t.Error("TextMapPropagator.Extract did not extract proper state") } else if s.Extractions != 1 { t.Error("TextMapPropagator.Extract did not increment state.Extractions") } if carrier.GotKey(t, name) { // Test the extraction was counted. propagator.ExtractedN(t, ctx, 1) // Ensure no additional injection was recorded. propagator.InjectedN(t, carrier, 1) } } func TestTextMapPropagatorFields(t *testing.T) { name := "testing" propagator := NewTextMapPropagator(name) if got := propagator.Fields(); len(got) != 1 { t.Errorf("TextMapPropagator.Fields returned %d fields, want 1", len(got)) } else if got[0] != name { t.Errorf("TextMapPropagator.Fields returned %q, want %q", got[0], name) } } func TestNewStateEmpty(t *testing.T) { if want, got := (state{}), newState(""); got != want { t.Errorf("newState(\"\") returned %v, want %v", got, want) } } opentelemetry-go-1.21.0/internal/matchers/000077500000000000000000000000001452547353200205265ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/matchers/expectation.go000066400000000000000000000175521452547353200234120ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/expectation.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/internal/matchers" import ( "fmt" "reflect" "regexp" "runtime/debug" "strings" "testing" "time" ) var stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`) type Expectation struct { t *testing.T actual interface{} } func (e *Expectation) ToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if !reflect.DeepEqual(e.actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nto equal\n\t%v", e.actual, expected)) } } func (e *Expectation) NotToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if reflect.DeepEqual(e.actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to equal\n\t%v", e.actual, expected)) } } func (e *Expectation) ToBeNil() { if e.actual != nil { e.fail(fmt.Sprintf("Expected\n\t%v\nto be nil", e.actual)) } } func (e *Expectation) NotToBeNil() { if e.actual == nil { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to be nil", e.actual)) } } func (e *Expectation) ToBeTrue() { switch a := e.actual.(type) { case bool: if !a { e.fail(fmt.Sprintf("Expected\n\t%v\nto be true", e.actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a)) } } func (e *Expectation) ToBeFalse() { switch a := e.actual.(type) { case bool: if a { e.fail(fmt.Sprintf("Expected\n\t%v\nto be false", e.actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a)) } } func (e *Expectation) NotToPanic() { switch a := e.actual.(type) { case func(): func() { defer func() { if recovered := recover(); recovered != nil { e.fail(fmt.Sprintf("Expected panic\n\t%v\nto have not been raised", recovered)) } }() a() }() default: e.fail(fmt.Sprintf("Cannot check if non-func value\n\t%v\nis truthy", a)) } } func (e *Expectation) ToSucceed() { switch actual := e.actual.(type) { case error: if actual != nil { e.fail(fmt.Sprintf("Expected error\n\t%v\nto have succeeded", actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nsucceeded", actual)) } } func (e *Expectation) ToMatchError(expected interface{}) { e.verifyExpectedNotNil(expected) actual, ok := e.actual.(error) if !ok { e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nmatches error", e.actual)) } switch expected := expected.(type) { case error: if !reflect.DeepEqual(actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected)) } case string: if actual.Error() != expected { e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected)) } default: e.fail(fmt.Sprintf("Cannot match\n\t%v\nagainst non-error\n\t%v", actual, expected)) } } func (e *Expectation) ToContain(expected interface{}) { actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() switch actualKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual)) return } expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: expectedValue = reflect.ValueOf([]interface{}{expected}) } for i := 0; i < expectedValue.Len(); i++ { var contained bool expectedElem := expectedValue.Index(i).Interface() for j := 0; j < actualValue.Len(); j++ { if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) { contained = true break } } if !contained { e.fail(fmt.Sprintf("Expected\n\t%v\nto contain\n\t%v", e.actual, expectedElem)) return } } } func (e *Expectation) NotToContain(expected interface{}) { actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() switch actualKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual)) return } expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: expectedValue = reflect.ValueOf([]interface{}{expected}) } for i := 0; i < expectedValue.Len(); i++ { expectedElem := expectedValue.Index(i).Interface() for j := 0; j < actualValue.Len(); j++ { if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to contain\n\t%v", e.actual, expectedElem)) return } } } } func (e *Expectation) ToMatchInAnyOrder(expected interface{}) { expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", expected)) return } actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() if actualKind != expectedKind { e.fail(fmt.Sprintf("Expected\n\t%v\nto be the same type as\n\t%v", e.actual, expected)) return } if actualValue.Len() != expectedValue.Len() { e.fail(fmt.Sprintf("Expected\n\t%v\nto have the same length as\n\t%v", e.actual, expected)) return } var unmatched []interface{} for i := 0; i < expectedValue.Len(); i++ { unmatched = append(unmatched, expectedValue.Index(i).Interface()) } for i := 0; i < actualValue.Len(); i++ { var found bool for j, elem := range unmatched { if reflect.DeepEqual(actualValue.Index(i).Interface(), elem) { found = true unmatched = append(unmatched[:j], unmatched[j+1:]...) break } } if !found { e.fail(fmt.Sprintf("Expected\n\t%v\nto contain the same elements as\n\t%v", e.actual, expected)) } } } func (e *Expectation) ToBeTemporally(matcher TemporalMatcher, compareTo interface{}) { if actual, ok := e.actual.(time.Time); ok { ct, ok := compareTo.(time.Time) if !ok { e.fail(fmt.Sprintf("Cannot compare to non-temporal value\n\t%v", compareTo)) return } switch matcher { case Before: if !actual.Before(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before\n\t%v", e.actual, compareTo)) } case BeforeOrSameTime: if actual.After(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before or at the same time as\n\t%v", e.actual, compareTo)) } case After: if !actual.After(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after\n\t%v", e.actual, compareTo)) } case AfterOrSameTime: if actual.Before(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after or at the same time as\n\t%v", e.actual, compareTo)) } default: e.fail("Cannot compare times with unexpected temporal matcher") } return } e.fail(fmt.Sprintf("Cannot compare non-temporal value\n\t%v", e.actual)) } func (e *Expectation) verifyExpectedNotNil(expected interface{}) { if expected == nil { e.fail("Refusing to compare with . Use `ToBeNil` or `NotToBeNil` instead.") } } func (e *Expectation) fail(msg string) { // Prune the stack trace so that it's easier to see relevant lines stack := strings.Split(string(debug.Stack()), "\n") var prunedStack []string for _, line := range stack { if !stackTracePruneRE.MatchString(line) { prunedStack = append(prunedStack, line) } } e.t.Fatalf("\n%s\n%s\n", strings.Join(prunedStack, "\n"), msg) } opentelemetry-go-1.21.0/internal/matchers/expecter.go000066400000000000000000000020031452547353200226670ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/expecter.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/internal/matchers" import ( "testing" ) type Expecter struct { t *testing.T } func NewExpecter(t *testing.T) *Expecter { return &Expecter{ t: t, } } func (a *Expecter) Expect(actual interface{}) *Expectation { return &Expectation{ t: a.t, actual: actual, } } opentelemetry-go-1.21.0/internal/matchers/package.go000066400000000000000000000012361452547353200224520ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/internal/matchers" opentelemetry-go-1.21.0/internal/matchers/temporal_matcher.go000066400000000000000000000017221452547353200244050ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/temporal_matcher.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/internal/matchers" type TemporalMatcher byte //nolint:revive // ignoring missing comments for unexported constants in an internal package const ( Before TemporalMatcher = iota BeforeOrSameTime After AfterOrSameTime ) opentelemetry-go-1.21.0/internal/rawhelpers.go000066400000000000000000000023711452547353200214260ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/internal" import ( "math" "unsafe" ) func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. if b { return 1 } return 0 } func RawToBool(r uint64) bool { return r != 0 } func Int64ToRaw(i int64) uint64 { return uint64(i) } func RawToInt64(r uint64) int64 { return int64(r) } func Float64ToRaw(f float64) uint64 { return math.Float64bits(f) } func RawToFloat64(r uint64) float64 { return math.Float64frombits(r) } func RawPtrToFloat64Ptr(r *uint64) *float64 { return (*float64)(unsafe.Pointer(r)) } func RawPtrToInt64Ptr(r *uint64) *int64 { return (*int64)(unsafe.Pointer(r)) } opentelemetry-go-1.21.0/internal/shared/000077500000000000000000000000001452547353200201665ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/README.md000066400000000000000000000002311452547353200214410ustar00rootroot00000000000000# Shared Code under this directory contains reusable internal code which is distributed across packages using `//go:generate gotmpl` in `gen.go` files. opentelemetry-go-1.21.0/internal/shared/internaltest/000077500000000000000000000000001452547353200227025ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/internaltest/alignment.go.tmpl000066400000000000000000000044221452547353200261640ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/alignment.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest /* This file contains common utilities and objects to validate memory alignment of Go types. The primary use of this functionality is intended to ensure `struct` fields that need to be 64-bit aligned so they can be passed as arguments to 64-bit atomic operations. The common workflow is to define a slice of `FieldOffset` and pass them to the `Aligned8Byte` function from within a `TestMain` function from a package's tests. It is important to make this call from the `TestMain` function prior to running the rest of the test suit as it can provide useful diagnostics about field alignment instead of ambiguous nil pointer dereference and runtime panic. For more information: https://github.com/open-telemetry/opentelemetry-go/issues/341 */ import ( "fmt" "io" ) // FieldOffset is a preprocessor representation of a struct field alignment. type FieldOffset struct { // Name of the field. Name string // Offset of the field in bytes. // // To compute this at compile time use unsafe.Offsetof. Offset uintptr } // Aligned8Byte returns if all fields are aligned modulo 8-bytes. // // Error messaging is printed to out for any field determined misaligned. func Aligned8Byte(fields []FieldOffset, out io.Writer) bool { misaligned := make([]FieldOffset, 0) for _, f := range fields { if f.Offset%8 != 0 { misaligned = append(misaligned, f) } } if len(misaligned) == 0 { return true } fmt.Fprintln(out, "struct fields not aligned for 64-bit atomic operations:") for _, f := range misaligned { fmt.Fprintf(out, " %s: %d-byte offset\n", f.Name, f.Offset) } return false } opentelemetry-go-1.21.0/internal/shared/internaltest/env.go.tmpl000066400000000000000000000040211452547353200247710ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/env.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "os" ) type Env struct { Name string Value string Exists bool } // EnvStore stores and recovers environment variables. type EnvStore interface { // Records the environment variable into the store. Record(key string) // Restore recovers the environment variables in the store. Restore() error } var _ EnvStore = (*envStore)(nil) type envStore struct { store map[string]Env } func (s *envStore) add(env Env) { s.store[env.Name] = env } func (s *envStore) Restore() error { var err error for _, v := range s.store { if v.Exists { err = os.Setenv(v.Name, v.Value) } else { err = os.Unsetenv(v.Name) } if err != nil { return err } } return nil } func (s *envStore) setEnv(key, value string) error { s.Record(key) err := os.Setenv(key, value) if err != nil { return err } return nil } func (s *envStore) Record(key string) { originValue, exists := os.LookupEnv(key) s.add(Env{ Name: key, Value: originValue, Exists: exists, }) } func NewEnvStore() EnvStore { return newEnvStore() } func newEnvStore() *envStore { return &envStore{store: make(map[string]Env)} } func SetEnvVariables(env map[string]string) (EnvStore, error) { envStore := newEnvStore() for k, v := range env { err := envStore.setEnv(k, v) if err != nil { return nil, err } } return envStore, nil } opentelemetry-go-1.21.0/internal/shared/internaltest/env_test.go.tmpl000066400000000000000000000114731452547353200260410ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/env_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) type EnvStoreTestSuite struct { suite.Suite } func (s *EnvStoreTestSuite) Test_add() { envStore := newEnvStore() e := Env{ Name: "name", Value: "value", Exists: true, } envStore.add(e) envStore.add(e) s.Assert().Len(envStore.store, 1) } func (s *EnvStoreTestSuite) TestRecord() { testCases := []struct { name string env Env expectedEnvStore *envStore }{ { name: "record exists env", env: Env{ Name: "name", Value: "value", Exists: true, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "value", Exists: true, }, }}, }, { name: "record exists env, but its value is empty", env: Env{ Name: "name", Value: "", Exists: true, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "", Exists: true, }, }}, }, { name: "record not exists env", env: Env{ Name: "name", Exists: false, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Exists: false, }, }}, }, } for _, tc := range testCases { s.Run(tc.name, func() { if tc.env.Exists { s.Assert().NoError(os.Setenv(tc.env.Name, tc.env.Value)) } envStore := newEnvStore() envStore.Record(tc.env.Name) s.Assert().Equal(tc.expectedEnvStore, envStore) if tc.env.Exists { s.Assert().NoError(os.Unsetenv(tc.env.Name)) } }) } } func (s *EnvStoreTestSuite) TestRestore() { testCases := []struct { name string env Env expectedEnvValue string expectedEnvExists bool }{ { name: "exists env", env: Env{ Name: "name", Value: "value", Exists: true, }, expectedEnvValue: "value", expectedEnvExists: true, }, { name: "no exists env", env: Env{ Name: "name", Exists: false, }, expectedEnvExists: false, }, } for _, tc := range testCases { s.Run(tc.name, func() { envStore := newEnvStore() envStore.add(tc.env) // Backup backup := newEnvStore() backup.Record(tc.env.Name) s.Require().NoError(os.Unsetenv(tc.env.Name)) s.Assert().NoError(envStore.Restore()) v, exists := os.LookupEnv(tc.env.Name) s.Assert().Equal(tc.expectedEnvValue, v) s.Assert().Equal(tc.expectedEnvExists, exists) // Restore s.Require().NoError(backup.Restore()) }) } } func (s *EnvStoreTestSuite) Test_setEnv() { testCases := []struct { name string key string value string expectedEnvStore *envStore expectedEnvValue string expectedEnvExists bool }{ { name: "normal", key: "name", value: "value", expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "other value", Exists: true, }, }}, expectedEnvValue: "value", expectedEnvExists: true, }, } for _, tc := range testCases { s.Run(tc.name, func() { envStore := newEnvStore() // Backup backup := newEnvStore() backup.Record(tc.key) s.Require().NoError(os.Setenv(tc.key, "other value")) s.Assert().NoError(envStore.setEnv(tc.key, tc.value)) s.Assert().Equal(tc.expectedEnvStore, envStore) v, exists := os.LookupEnv(tc.key) s.Assert().Equal(tc.expectedEnvValue, v) s.Assert().Equal(tc.expectedEnvExists, exists) // Restore s.Require().NoError(backup.Restore()) }) } } func TestEnvStoreTestSuite(t *testing.T) { suite.Run(t, new(EnvStoreTestSuite)) } func TestSetEnvVariables(t *testing.T) { envs := map[string]string{ "name1": "value1", "name2": "value2", } // Backup backup := newEnvStore() for k := range envs { backup.Record(k) } defer func() { require.NoError(t, backup.Restore()) }() store, err := SetEnvVariables(envs) assert.NoError(t, err) require.IsType(t, &envStore{}, store) concreteStore := store.(*envStore) assert.Len(t, concreteStore.store, 2) assert.Equal(t, backup, concreteStore) } opentelemetry-go-1.21.0/internal/shared/internaltest/errors.go.tmpl000066400000000000000000000015661452547353200255300ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/errors.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest type TestError string var _ error = TestError("") func NewTestError(s string) error { return TestError(s) } func (e TestError) Error() string { return string(e) } opentelemetry-go-1.21.0/internal/shared/internaltest/harness.go.tmpl000066400000000000000000000215351452547353200256550ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/harness.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "context" "fmt" "sync" "testing" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "{{ .matchersImportPath }}" "go.opentelemetry.io/otel/trace" ) // Harness is a testing harness used to test implementations of the // OpenTelemetry API. type Harness struct { t *testing.T } // NewHarness returns an instantiated *Harness using t. func NewHarness(t *testing.T) *Harness { return &Harness{ t: t, } } // TestTracerProvider runs validation tests for an implementation of the OpenTelemetry // TracerProvider API. func (h *Harness) TestTracerProvider(subjectFactory func() trace.TracerProvider) { h.t.Run("#Start", func(t *testing.T) { t.Run("allow creating an arbitrary number of TracerProvider instances", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) tp1 := subjectFactory() tp2 := subjectFactory() e.Expect(tp1).NotToEqual(tp2) }) t.Run("all methods are safe to be called concurrently", func(t *testing.T) { t.Parallel() runner := func(tp trace.TracerProvider) <-chan struct{} { done := make(chan struct{}) go func(tp trace.TracerProvider) { var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func(name, version string) { _ = tp.Tracer(name, trace.WithInstrumentationVersion(version)) wg.Done() }(fmt.Sprintf("tracer %d", i%5), fmt.Sprintf("%d", i)) } wg.Wait() done <- struct{}{} }(tp) return done } matchers.NewExpecter(t).Expect(func() { // Run with multiple TracerProvider to ensure they encapsulate // their own Tracers. tp1 := subjectFactory() tp2 := subjectFactory() done1 := runner(tp1) done2 := runner(tp2) <-done1 <-done2 }).NotToPanic() }) }) } // TestTracer runs validation tests for an implementation of the OpenTelemetry // Tracer API. func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) { h.t.Run("#Start", func(t *testing.T) { t.Run("propagates the original context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctxKey := testCtxKey{} ctxValue := "ctx value" ctx := context.WithValue(context.Background(), ctxKey, ctxValue) ctx, _ = subject.Start(ctx, "test") e.Expect(ctx.Value(ctxKey)).ToEqual(ctxValue) }) t.Run("returns a span containing the expected properties", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, span := subject.Start(context.Background(), "test") e.Expect(span).NotToBeNil() e.Expect(span.SpanContext().IsValid()).ToBeTrue() }) t.Run("stores the span on the provided context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, span := subject.Start(context.Background(), "test") e.Expect(span).NotToBeNil() e.Expect(span.SpanContext()).NotToEqual(trace.SpanContext{}) e.Expect(trace.SpanFromContext(ctx)).ToEqual(span) }) t.Run("starts spans with unique trace and span IDs", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, span1 := subject.Start(context.Background(), "span1") _, span2 := subject.Start(context.Background(), "span2") sc1 := span1.SpanContext() sc2 := span2.SpanContext() e.Expect(sc1.TraceID()).NotToEqual(sc2.TraceID()) e.Expect(sc1.SpanID()).NotToEqual(sc2.SpanID()) }) t.Run("propagates a parent's trace ID through the context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, parent := subject.Start(context.Background(), "parent") _, child := subject.Start(ctx, "child") psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("ignores parent's trace ID when new root is requested", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, parent := subject.Start(context.Background(), "parent") _, child := subject.Start(ctx, "child", trace.WithNewRoot()) psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).NotToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("propagates remote parent's trace ID through the context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, remoteParent := subject.Start(context.Background(), "remote parent") parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext()) _, child := subject.Start(parentCtx, "child") psc := remoteParent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("ignores remote parent's trace ID when new root is requested", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, remoteParent := subject.Start(context.Background(), "remote parent") parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext()) _, child := subject.Start(parentCtx, "child", trace.WithNewRoot()) psc := remoteParent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).NotToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("all methods are safe to be called concurrently", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) tracer := subjectFactory() ctx, parent := tracer.Start(context.Background(), "span") runner := func(tp trace.Tracer) <-chan struct{} { done := make(chan struct{}) go func(tp trace.Tracer) { var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func(name string) { defer wg.Done() _, child := tp.Start(ctx, name) psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }(fmt.Sprintf("span %d", i)) } wg.Wait() done <- struct{}{} }(tp) return done } e.Expect(func() { done := runner(tracer) <-done }).NotToPanic() }) }) h.testSpan(subjectFactory) } func (h *Harness) testSpan(tracerFactory func() trace.Tracer) { methods := map[string]func(span trace.Span){ "#End": func(span trace.Span) { span.End() }, "#AddEvent": func(span trace.Span) { span.AddEvent("test event") }, "#AddEventWithTimestamp": func(span trace.Span) { span.AddEvent("test event", trace.WithTimestamp(time.Now().Add(1*time.Second))) }, "#SetStatus": func(span trace.Span) { span.SetStatus(codes.Error, "internal") }, "#SetName": func(span trace.Span) { span.SetName("new name") }, "#SetAttributes": func(span trace.Span) { span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123)) }, } mechanisms := map[string]func() trace.Span{ "Span created via Tracer#Start": func() trace.Span { tracer := tracerFactory() _, subject := tracer.Start(context.Background(), "test") return subject }, "Span created via span.TracerProvider()": func() trace.Span { ctx, spanA := tracerFactory().Start(context.Background(), "span1") _, spanB := spanA.TracerProvider().Tracer("second").Start(ctx, "span2") return spanB }, } for mechanismName, mechanism := range mechanisms { h.t.Run(mechanismName, func(t *testing.T) { for methodName, method := range methods { t.Run(methodName, func(t *testing.T) { t.Run("is thread-safe", func(t *testing.T) { t.Parallel() span := mechanism() wg := &sync.WaitGroup{} wg.Add(2) go func() { defer wg.Done() method(span) }() go func() { defer wg.Done() method(span) }() wg.Wait() }) }) } t.Run("#End", func(t *testing.T) { t.Run("can be called multiple times", func(t *testing.T) { t.Parallel() span := mechanism() span.End() span.End() }) }) }) } } type testCtxKey struct{} opentelemetry-go-1.21.0/internal/shared/internaltest/text_map_carrier.go.tmpl000066400000000000000000000072061452547353200275410ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_carrier.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "sync" "testing" "go.opentelemetry.io/otel/propagation" ) // TextMapCarrier is a storage medium for a TextMapPropagator used in testing. // The methods of a TextMapCarrier are concurrent safe. type TextMapCarrier struct { mtx sync.Mutex gets []string sets [][2]string data map[string]string } var _ propagation.TextMapCarrier = (*TextMapCarrier)(nil) // NewTextMapCarrier returns a new *TextMapCarrier populated with data. func NewTextMapCarrier(data map[string]string) *TextMapCarrier { copied := make(map[string]string, len(data)) for k, v := range data { copied[k] = v } return &TextMapCarrier{data: copied} } // Keys returns the keys for which this carrier has a value. func (c *TextMapCarrier) Keys() []string { c.mtx.Lock() defer c.mtx.Unlock() result := make([]string, 0, len(c.data)) for k := range c.data { result = append(result, k) } return result } // Get returns the value associated with the passed key. func (c *TextMapCarrier) Get(key string) string { c.mtx.Lock() defer c.mtx.Unlock() c.gets = append(c.gets, key) return c.data[key] } // GotKey tests if c.Get has been called for key. func (c *TextMapCarrier) GotKey(t *testing.T, key string) bool { c.mtx.Lock() defer c.mtx.Unlock() for _, k := range c.gets { if k == key { return true } } t.Errorf("TextMapCarrier.Get(%q) has not been called", key) return false } // GotN tests if n calls to c.Get have been made. func (c *TextMapCarrier) GotN(t *testing.T, n int) bool { c.mtx.Lock() defer c.mtx.Unlock() if len(c.gets) != n { t.Errorf("TextMapCarrier.Get was called %d times, not %d", len(c.gets), n) return false } return true } // Set stores the key-value pair. func (c *TextMapCarrier) Set(key, value string) { c.mtx.Lock() defer c.mtx.Unlock() c.sets = append(c.sets, [2]string{key, value}) c.data[key] = value } // SetKeyValue tests if c.Set has been called for the key-value pair. func (c *TextMapCarrier) SetKeyValue(t *testing.T, key, value string) bool { c.mtx.Lock() defer c.mtx.Unlock() var vals []string for _, pair := range c.sets { if key == pair[0] { if value == pair[1] { return true } vals = append(vals, pair[1]) } } if len(vals) > 0 { t.Errorf("TextMapCarrier.Set called with %q and %v values, but not %s", key, vals, value) } t.Errorf("TextMapCarrier.Set(%q,%q) has not been called", key, value) return false } // SetN tests if n calls to c.Set have been made. func (c *TextMapCarrier) SetN(t *testing.T, n int) bool { c.mtx.Lock() defer c.mtx.Unlock() if len(c.sets) != n { t.Errorf("TextMapCarrier.Set was called %d times, not %d", len(c.sets), n) return false } return true } // Reset zeros out the recording state and sets the carried values to data. func (c *TextMapCarrier) Reset(data map[string]string) { copied := make(map[string]string, len(data)) for k, v := range data { copied[k] = v } c.mtx.Lock() defer c.mtx.Unlock() c.gets = nil c.sets = nil c.data = copied } opentelemetry-go-1.21.0/internal/shared/internaltest/text_map_carrier_test.go.tmpl000066400000000000000000000046051452547353200306000ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_carrier_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "reflect" "testing" ) var key, value = "test", "true" func TestTextMapCarrierKeys(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) expected, actual := []string{key}, tmc.Keys() if !reflect.DeepEqual(actual, expected) { t.Errorf("expected tmc.Keys() to be %v but it was %v", expected, actual) } } func TestTextMapCarrierGet(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) tmc.GotN(t, 0) if got := tmc.Get("empty"); got != "" { t.Errorf("TextMapCarrier.Get returned %q for an empty key", got) } tmc.GotKey(t, "empty") tmc.GotN(t, 1) if got := tmc.Get(key); got != value { t.Errorf("TextMapCarrier.Get(%q) returned %q, want %q", key, got, value) } tmc.GotKey(t, key) tmc.GotN(t, 2) } func TestTextMapCarrierSet(t *testing.T) { tmc := NewTextMapCarrier(nil) tmc.SetN(t, 0) tmc.Set(key, value) if got, ok := tmc.data[key]; !ok { t.Errorf("TextMapCarrier.Set(%q,%q) failed to store pair", key, value) } else if got != value { t.Errorf("TextMapCarrier.Set(%q,%q) stored (%q,%q), not (%q,%q)", key, value, key, got, key, value) } tmc.SetKeyValue(t, key, value) tmc.SetN(t, 1) } func TestTextMapCarrierReset(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) tmc.GotN(t, 0) tmc.SetN(t, 0) tmc.Reset(nil) tmc.GotN(t, 0) tmc.SetN(t, 0) if got := tmc.Get(key); got != "" { t.Error("TextMapCarrier.Reset() failed to clear initial data") } tmc.GotN(t, 1) tmc.GotKey(t, key) tmc.Set(key, value) tmc.SetKeyValue(t, key, value) tmc.SetN(t, 1) tmc.Reset(nil) tmc.GotN(t, 0) tmc.SetN(t, 0) if got := tmc.Get(key); got != "" { t.Error("TextMapCarrier.Reset() failed to clear data") } } opentelemetry-go-1.21.0/internal/shared/internaltest/text_map_propagator.go.tmpl000066400000000000000000000066501452547353200302720ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_propagator.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "context" "fmt" "strconv" "strings" "testing" "go.opentelemetry.io/otel/propagation" ) type ctxKeyType string type state struct { Injections uint64 Extractions uint64 } func newState(encoded string) state { if encoded == "" { return state{} } s0, s1, _ := strings.Cut(encoded, ",") injects, _ := strconv.ParseUint(s0, 10, 64) extracts, _ := strconv.ParseUint(s1, 10, 64) return state{ Injections: injects, Extractions: extracts, } } func (s state) String() string { return fmt.Sprintf("%d,%d", s.Injections, s.Extractions) } // TextMapPropagator is a propagation.TextMapPropagator used for testing. type TextMapPropagator struct { name string ctxKey ctxKeyType } var _ propagation.TextMapPropagator = (*TextMapPropagator)(nil) // NewTextMapPropagator returns a new TextMapPropagator for testing. It will // use name as the key it injects into a TextMapCarrier when Inject is called. func NewTextMapPropagator(name string) *TextMapPropagator { return &TextMapPropagator{name: name, ctxKey: ctxKeyType(name)} } func (p *TextMapPropagator) stateFromContext(ctx context.Context) state { if v := ctx.Value(p.ctxKey); v != nil { if s, ok := v.(state); ok { return s } } return state{} } func (p *TextMapPropagator) stateFromCarrier(carrier propagation.TextMapCarrier) state { return newState(carrier.Get(p.name)) } // Inject sets cross-cutting concerns for p from ctx into carrier. func (p *TextMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { s := p.stateFromContext(ctx) s.Injections++ carrier.Set(p.name, s.String()) } // InjectedN tests if p has made n injections to carrier. func (p *TextMapPropagator) InjectedN(t *testing.T, carrier *TextMapCarrier, n int) bool { if actual := p.stateFromCarrier(carrier).Injections; actual != uint64(n) { t.Errorf("TextMapPropagator{%q} injected %d times, not %d", p.name, actual, n) return false } return true } // Extract reads cross-cutting concerns for p from carrier into ctx. func (p *TextMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { s := p.stateFromCarrier(carrier) s.Extractions++ return context.WithValue(ctx, p.ctxKey, s) } // ExtractedN tests if p has made n extractions from the lineage of ctx. // nolint (context is not first arg) func (p *TextMapPropagator) ExtractedN(t *testing.T, ctx context.Context, n int) bool { if actual := p.stateFromContext(ctx).Extractions; actual != uint64(n) { t.Errorf("TextMapPropagator{%q} extracted %d time, not %d", p.name, actual, n) return false } return true } // Fields returns the name of p as the key who's value is set with Inject. func (p *TextMapPropagator) Fields() []string { return []string{p.name} } opentelemetry-go-1.21.0/internal/shared/internaltest/text_map_propagator_test.go.tmpl000066400000000000000000000044141452547353200313250ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_propagator_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "context" "testing" ) func TestTextMapPropagatorInjectExtract(t *testing.T) { name := "testing" ctx := context.Background() carrier := NewTextMapCarrier(map[string]string{name: value}) propagator := NewTextMapPropagator(name) propagator.Inject(ctx, carrier) // Carrier value overridden with state. if carrier.SetKeyValue(t, name, "1,0") { // Ensure nothing has been extracted yet. propagator.ExtractedN(t, ctx, 0) // Test the injection was counted. propagator.InjectedN(t, carrier, 1) } ctx = propagator.Extract(ctx, carrier) v := ctx.Value(ctxKeyType(name)) if v == nil { t.Error("TextMapPropagator.Extract failed to extract state") } if s, ok := v.(state); !ok { t.Error("TextMapPropagator.Extract did not extract proper state") } else if s.Extractions != 1 { t.Error("TextMapPropagator.Extract did not increment state.Extractions") } if carrier.GotKey(t, name) { // Test the extraction was counted. propagator.ExtractedN(t, ctx, 1) // Ensure no additional injection was recorded. propagator.InjectedN(t, carrier, 1) } } func TestTextMapPropagatorFields(t *testing.T) { name := "testing" propagator := NewTextMapPropagator(name) if got := propagator.Fields(); len(got) != 1 { t.Errorf("TextMapPropagator.Fields returned %d fields, want 1", len(got)) } else if got[0] != name { t.Errorf("TextMapPropagator.Fields returned %q, want %q", got[0], name) } } func TestNewStateEmpty(t *testing.T) { if want, got := (state{}), newState(""); got != want { t.Errorf("newState(\"\") returned %v, want %v", got, want) } } opentelemetry-go-1.21.0/internal/shared/matchers/000077500000000000000000000000001452547353200217745ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/matchers/expectation.go.tmpl000066400000000000000000000174631452547353200256340ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/expectation.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers import ( "fmt" "reflect" "regexp" "runtime/debug" "strings" "testing" "time" ) var stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`) type Expectation struct { t *testing.T actual interface{} } func (e *Expectation) ToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if !reflect.DeepEqual(e.actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nto equal\n\t%v", e.actual, expected)) } } func (e *Expectation) NotToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if reflect.DeepEqual(e.actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to equal\n\t%v", e.actual, expected)) } } func (e *Expectation) ToBeNil() { if e.actual != nil { e.fail(fmt.Sprintf("Expected\n\t%v\nto be nil", e.actual)) } } func (e *Expectation) NotToBeNil() { if e.actual == nil { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to be nil", e.actual)) } } func (e *Expectation) ToBeTrue() { switch a := e.actual.(type) { case bool: if !a { e.fail(fmt.Sprintf("Expected\n\t%v\nto be true", e.actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a)) } } func (e *Expectation) ToBeFalse() { switch a := e.actual.(type) { case bool: if a { e.fail(fmt.Sprintf("Expected\n\t%v\nto be false", e.actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a)) } } func (e *Expectation) NotToPanic() { switch a := e.actual.(type) { case func(): func() { defer func() { if recovered := recover(); recovered != nil { e.fail(fmt.Sprintf("Expected panic\n\t%v\nto have not been raised", recovered)) } }() a() }() default: e.fail(fmt.Sprintf("Cannot check if non-func value\n\t%v\nis truthy", a)) } } func (e *Expectation) ToSucceed() { switch actual := e.actual.(type) { case error: if actual != nil { e.fail(fmt.Sprintf("Expected error\n\t%v\nto have succeeded", actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nsucceeded", actual)) } } func (e *Expectation) ToMatchError(expected interface{}) { e.verifyExpectedNotNil(expected) actual, ok := e.actual.(error) if !ok { e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nmatches error", e.actual)) } switch expected := expected.(type) { case error: if !reflect.DeepEqual(actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected)) } case string: if actual.Error() != expected { e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected)) } default: e.fail(fmt.Sprintf("Cannot match\n\t%v\nagainst non-error\n\t%v", actual, expected)) } } func (e *Expectation) ToContain(expected interface{}) { actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() switch actualKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual)) return } expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: expectedValue = reflect.ValueOf([]interface{}{expected}) } for i := 0; i < expectedValue.Len(); i++ { var contained bool expectedElem := expectedValue.Index(i).Interface() for j := 0; j < actualValue.Len(); j++ { if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) { contained = true break } } if !contained { e.fail(fmt.Sprintf("Expected\n\t%v\nto contain\n\t%v", e.actual, expectedElem)) return } } } func (e *Expectation) NotToContain(expected interface{}) { actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() switch actualKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual)) return } expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: expectedValue = reflect.ValueOf([]interface{}{expected}) } for i := 0; i < expectedValue.Len(); i++ { expectedElem := expectedValue.Index(i).Interface() for j := 0; j < actualValue.Len(); j++ { if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to contain\n\t%v", e.actual, expectedElem)) return } } } } func (e *Expectation) ToMatchInAnyOrder(expected interface{}) { expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", expected)) return } actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() if actualKind != expectedKind { e.fail(fmt.Sprintf("Expected\n\t%v\nto be the same type as\n\t%v", e.actual, expected)) return } if actualValue.Len() != expectedValue.Len() { e.fail(fmt.Sprintf("Expected\n\t%v\nto have the same length as\n\t%v", e.actual, expected)) return } var unmatched []interface{} for i := 0; i < expectedValue.Len(); i++ { unmatched = append(unmatched, expectedValue.Index(i).Interface()) } for i := 0; i < actualValue.Len(); i++ { var found bool for j, elem := range unmatched { if reflect.DeepEqual(actualValue.Index(i).Interface(), elem) { found = true unmatched = append(unmatched[:j], unmatched[j+1:]...) break } } if !found { e.fail(fmt.Sprintf("Expected\n\t%v\nto contain the same elements as\n\t%v", e.actual, expected)) } } } func (e *Expectation) ToBeTemporally(matcher TemporalMatcher, compareTo interface{}) { if actual, ok := e.actual.(time.Time); ok { ct, ok := compareTo.(time.Time) if !ok { e.fail(fmt.Sprintf("Cannot compare to non-temporal value\n\t%v", compareTo)) return } switch matcher { case Before: if !actual.Before(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before\n\t%v", e.actual, compareTo)) } case BeforeOrSameTime: if actual.After(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before or at the same time as\n\t%v", e.actual, compareTo)) } case After: if !actual.After(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after\n\t%v", e.actual, compareTo)) } case AfterOrSameTime: if actual.Before(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after or at the same time as\n\t%v", e.actual, compareTo)) } default: e.fail("Cannot compare times with unexpected temporal matcher") } return } e.fail(fmt.Sprintf("Cannot compare non-temporal value\n\t%v", e.actual)) } func (e *Expectation) verifyExpectedNotNil(expected interface{}) { if expected == nil { e.fail("Refusing to compare with . Use `ToBeNil` or `NotToBeNil` instead.") } } func (e *Expectation) fail(msg string) { // Prune the stack trace so that it's easier to see relevant lines stack := strings.Split(string(debug.Stack()), "\n") var prunedStack []string for _, line := range stack { if !stackTracePruneRE.MatchString(line) { prunedStack = append(prunedStack, line) } } e.t.Fatalf("\n%s\n%s\n", strings.Join(prunedStack, "\n"), msg) } opentelemetry-go-1.21.0/internal/shared/matchers/expecter.go.tmpl000066400000000000000000000017141452547353200251200ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/expecter.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers import ( "testing" ) type Expecter struct { t *testing.T } func NewExpecter(t *testing.T) *Expecter { return &Expecter{ t: t, } } func (a *Expecter) Expect(actual interface{}) *Expectation { return &Expectation{ t: a.t, actual: actual, } } opentelemetry-go-1.21.0/internal/shared/matchers/temporal_matcher.go.tmpl000066400000000000000000000016331452547353200266270ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/temporal_matcher.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers type TemporalMatcher byte //nolint:revive // ignoring missing comments for unexported constants in an internal package const ( Before TemporalMatcher = iota BeforeOrSameTime After AfterOrSameTime ) opentelemetry-go-1.21.0/internal/shared/otlp/000077500000000000000000000000001452547353200211445ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/envconfig/000077500000000000000000000000001452547353200231225ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/envconfig/envconfig.go.tmpl000066400000000000000000000130311452547353200264000ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig import ( "crypto/tls" "crypto/x509" "errors" "fmt" "net/url" "strconv" "strings" "time" "go.opentelemetry.io/otel/internal/global" ) // ConfigFn is the generic function used to set a config. type ConfigFn func(*EnvOptionsReader) // EnvOptionsReader reads the required environment variables. type EnvOptionsReader struct { GetEnv func(string) string ReadFile func(string) ([]byte, error) Namespace string } // Apply runs every ConfigFn. func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { for _, o := range opts { o(e) } } // GetEnvValue gets an OTLP environment variable value of the specified key // using the GetEnv function. // This function prepends the OTLP specified namespace to all key lookups. func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) return v, v != "" } // WithString retrieves the specified config and passes it to ConfigFn as a string. func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(v) } } } // WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. func WithBool(n string, fn func(bool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b := strings.ToLower(v) == "true" fn(b) } } } // WithDuration retrieves the specified config and passes it to ConfigFn as a duration. func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { d, err := strconv.Atoi(v) if err != nil { global.Error(err, "parse duration", "input", v) return } fn(time.Duration(d) * time.Millisecond) } } } // WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { fn(stringToHeader(v)) } } } // WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { u, err := url.Parse(v) if err != nil { global.Error(err, "parse url", "input", v) return } fn(u) } } } // WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { return func(e *EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { b, err := e.ReadFile(v) if err != nil { global.Error(err, "read tls ca cert file", "file", v) return } c, err := createCertPool(b) if err != nil { global.Error(err, "create tls cert pool") return } fn(c) } } } // WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { return func(e *EnvOptionsReader) { vc, okc := e.GetEnvValue(nc) vk, okk := e.GetEnvValue(nk) if !okc || !okk { return } cert, err := e.ReadFile(vc) if err != nil { global.Error(err, "read tls client cert", "file", vc) return } key, err := e.ReadFile(vk) if err != nil { global.Error(err, "read tls client key", "file", vk) return } crt, err := tls.X509KeyPair(cert, key) if err != nil { global.Error(err, "create tls client key pair") return } fn(crt) } } func keyWithNamespace(ns, key string) string { if ns == "" { return key } return fmt.Sprintf("%s_%s", ns, key) } func stringToHeader(value string) map[string]string { headersPairs := strings.Split(value, ",") headers := make(map[string]string) for _, header := range headersPairs { n, v, found := strings.Cut(header, "=") if !found { global.Error(errors.New("missing '="), "parse headers", "input", header) continue } name, err := url.PathUnescape(n) if err != nil { global.Error(err, "escape header key", "key", n) continue } trimmedName := strings.TrimSpace(name) value, err := url.PathUnescape(v) if err != nil { global.Error(err, "escape header value", "value", v) continue } trimmedValue := strings.TrimSpace(value) headers[trimmedName] = trimmedValue } return headers } func createCertPool(certBytes []byte) (*x509.CertPool, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return cp, nil } opentelemetry-go-1.21.0/internal/shared/otlp/envconfig/envconfig_test.go.tmpl000066400000000000000000000260371452547353200274510ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/envconfig/envconfig_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envconfig import ( "crypto/tls" "crypto/x509" "errors" "net/url" "testing" "time" "github.com/stretchr/testify/assert" ) const WeakKey = ` -----BEGIN EC PRIVATE KEY----- MHcCAQEEIEbrSPmnlSOXvVzxCyv+VR3a0HDeUTvOcqrdssZ2k4gFoAoGCCqGSM49 AwEHoUQDQgAEDMTfv75J315C3K9faptS9iythKOMEeV/Eep73nWX531YAkmmwBSB 2dXRD/brsgLnfG57WEpxZuY7dPRbxu33BA== -----END EC PRIVATE KEY----- ` const WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBjjCCATWgAwIBAgIUKQSMC66MUw+kPp954ZYOcyKAQDswCgYIKoZIzj0EAwIw EjEQMA4GA1UECgwHb3RlbC1nbzAeFw0yMjEwMTkwMDA5MTlaFw0yMzEwMTkwMDA5 MTlaMBIxEDAOBgNVBAoMB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC AAQMxN+/vknfXkLcr19qm1L2LK2Eo4wR5X8R6nvedZfnfVgCSabAFIHZ1dEP9uuy Aud8bntYSnFm5jt09FvG7fcEo2kwZzAdBgNVHQ4EFgQUicGuhnTTkYLZwofXMNLK SHFeCWgwHwYDVR0jBBgwFoAUicGuhnTTkYLZwofXMNLKSHFeCWgwDwYDVR0TAQH/ BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIg Lfma8FnnxeSOi6223AsFfYwsNZ2RderNsQrS0PjEHb0CIBkrWacqARUAu7uT4cGu jVcIxYQqhId5L8p/mAv2PWZS -----END CERTIFICATE----- ` type testOption struct { TestString string TestBool bool TestDuration time.Duration TestHeaders map[string]string TestURL *url.URL TestTLS *tls.Config } func TestEnvConfig(t *testing.T) { parsedURL, err := url.Parse("https://example.com") assert.NoError(t, err) options := []testOption{} for _, testcase := range []struct { name string reader EnvOptionsReader configs []ConfigFn expectedOptions []testOption }{ { name: "with no namespace and a matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HOLA", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a namespace and a matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "MY_NAMESPACE_HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{ { TestString: "world", }, }, }, { name: "with no namespace and a non-matching key", reader: EnvOptionsReader{ Namespace: "MY_NAMESPACE", GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithString("HELLO", func(v string) { options = append(options, testOption{TestString: v}) }), }, expectedOptions: []testOption{}, }, { name: "with a bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "true" } else if n == "WORLD" { return "false" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), WithBool("WORLD", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: true, }, { TestBool: false, }, }, }, { name: "with an invalid bool config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithBool("HELLO", func(b bool) { options = append(options, testOption{TestBool: b}) }), }, expectedOptions: []testOption{ { TestBool: false, }, }, }, { name: "with a duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "60" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{ { TestDuration: 60_000_000, // 60 milliseconds }, }, }, { name: "with an invalid duration config", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithDuration("HELLO", func(v time.Duration) { options = append(options, testOption{TestDuration: v}) }), }, expectedOptions: []testOption{}, }, { name: "with headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "userId=42,userName=alice" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{ "userId": "42", "userName": "alice", }, }, }, }, { name: "with invalid headers", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "world" } return "" }, }, configs: []ConfigFn{ WithHeaders("HELLO", func(v map[string]string) { options = append(options, testOption{TestHeaders: v}) }), }, expectedOptions: []testOption{ { TestHeaders: map[string]string{}, }, }, }, { name: "with URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "https://example.com" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{ { TestURL: parsedURL, }, }, }, { name: "with invalid URL", reader: EnvOptionsReader{ GetEnv: func(n string) string { if n == "HELLO" { return "i nvalid://url" } return "" }, }, configs: []ConfigFn{ WithURL("HELLO", func(v *url.URL) { options = append(options, testOption{TestURL: v}) }), }, expectedOptions: []testOption{}, }, } { t.Run(testcase.name, func(t *testing.T) { testcase.reader.Apply(testcase.configs...) assert.Equal(t, testcase.expectedOptions, options) options = []testOption{} }) } } func TestWithTLSConfig(t *testing.T) { pool, err := createCertPool([]byte(WeakCertificate)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { if n == "CERTIFICATE" { return "/path/cert.pem" } return "" }, ReadFile: func(p string) ([]byte, error) { if p == "/path/cert.pem" { return []byte(WeakCertificate), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithCertPool("CERTIFICATE", func(cp *x509.CertPool) { option = testOption{TestTLS: &tls.Config{RootCAs: cp}} }), ) // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, pool.Subjects(), option.TestTLS.RootCAs.Subjects()) } func TestWithClientCert(t *testing.T) { cert, err := tls.X509KeyPair([]byte(WeakCertificate), []byte(WeakKey)) assert.NoError(t, err) reader := EnvOptionsReader{ GetEnv: func(n string) string { switch n { case "CLIENT_CERTIFICATE": return "/path/tls.crt" case "CLIENT_KEY": return "/path/tls.key" } return "" }, ReadFile: func(n string) ([]byte, error) { switch n { case "/path/tls.crt": return []byte(WeakCertificate), nil case "/path/tls.key": return []byte(WeakKey), nil } return []byte{}, nil }, } var option testOption reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Equal(t, cert, option.TestTLS.Certificates[0]) reader.ReadFile = func(s string) ([]byte, error) { return nil, errors.New("oops") } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) reader.GetEnv = func(s string) string { return "" } option.TestTLS = nil reader.Apply( WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}} }), ) assert.Nil(t, option.TestTLS) } func TestStringToHeader(t *testing.T) { tests := []struct { name string value string want map[string]string }{ { name: "simple test", value: "userId=alice", want: map[string]string{"userId": "alice"}, }, { name: "simple test with spaces", value: " userId = alice ", want: map[string]string{"userId": "alice"}, }, { name: "simple header conforms to RFC 3986 spec", value: " userId = alice+test ", want: map[string]string{"userId": "alice+test"}, }, { name: "multiple headers encoded", value: "userId=alice,serverNode=DF%3A28,isProduction=false", want: map[string]string{ "userId": "alice", "serverNode": "DF:28", "isProduction": "false", }, }, { name: "multiple headers encoded per RFC 3986 spec", value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test", want: map[string]string{ "userId": "alice+test", "serverNode": "DF:28", "isProduction": "false", "namespace": "localhost/test", }, }, { name: "invalid headers format", value: "userId:alice", want: map[string]string{}, }, { name: "invalid key", value: "%XX=missing,userId=alice", want: map[string]string{ "userId": "alice", }, }, { name: "invalid value", value: "missing=%XX,userId=alice", want: map[string]string{ "userId": "alice", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.want, stringToHeader(tt.value)) }) } } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/000077500000000000000000000000001452547353200233265ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/oconf/000077500000000000000000000000001452547353200244325ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl000066400000000000000000000167631452547353200277270ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "crypto/tls" "crypto/x509" "net/url" "os" "path" "strings" "time" "{{ .envconfigImportPath }}" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // DefaultEnvOptionsReader is the default environments reader. var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: os.Getenv, ReadFile: os.ReadFile, Namespace: "OTEL_EXPORTER_OTLP", } // ApplyGRPCEnvConfigs applies the env configurations for gRPC. func ApplyGRPCEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } return cfg } // ApplyHTTPEnvConfigs applies the env configurations for HTTP. func ApplyHTTPEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } return cfg } func getOptionsFromEnv() []GenericOption { opts := []GenericOption{} tlsConf := &tls.Config{} DefaultEnvOptionsReader.Apply( envconfig.WithURL("ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Metrics.Endpoint = u.Host // For OTLP/HTTP endpoint URLs without a per-signal // configuration, the passed endpoint is used as a base URL // and the signals are sent to these paths relative to that. cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath) return cfg }, withEndpointForGRPC(u))) }), envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Metrics.Endpoint = u.Host // For endpoint URLs for OTLP/HTTP per-signal variables, the // URL MUST be used as-is without any modification. The only // exception is that if an URL contains no path part, the root // path / MUST be used. path := u.Path if path == "" { path = "/" } cfg.Metrics.URLPath = path return cfg }, withEndpointForGRPC(u))) }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }), withEnvAggPreference("METRICS_DEFAULT_HISTOGRAM_AGGREGATION", func(a metric.AggregationSelector) { opts = append(opts, WithAggregationSelector(a)) }), ) return opts } func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { return func(cfg Config) Config { // For OTLP/gRPC endpoints, this is the target to which the // exporter is going to send telemetry. cfg.Metrics.Endpoint = path.Join(u.Host, u.Path) return cfg } } // WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { cp := NoCompression if v == "gzip" { cp = GzipCompression } fn(cp) } } } func withEndpointScheme(u *url.URL) GenericOption { switch strings.ToLower(u.Scheme) { case "http", "unix": return WithInsecure() default: return WithSecure() } } // revive:disable-next-line:flag-parameter func withInsecure(b bool) GenericOption { if b { return WithInsecure() } return WithSecure() } func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if c.RootCAs != nil || len(c.Certificates) > 0 { fn(c) } } } func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "cumulative": fn(cumulativeTemporality) case "delta": fn(deltaTemporality) case "lowmemory": fn(lowMemory) default: global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s) } } } } func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality { switch ik { case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter: return metricdata.DeltaTemporality default: return metricdata.CumulativeTemporality } } func lowMemory(ik metric.InstrumentKind) metricdata.Temporality { switch ik { case metric.InstrumentKindCounter, metric.InstrumentKindHistogram: return metricdata.DeltaTemporality default: return metricdata.CumulativeTemporality } } func withEnvAggPreference(n string, fn func(metric.AggregationSelector)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if s, ok := e.GetEnvValue(n); ok { switch strings.ToLower(s) { case "explicit_bucket_histogram": fn(metric.DefaultAggregationSelector) case "base2_exponential_bucket_histogram": fn(func(kind metric.InstrumentKind) metric.Aggregation { if kind == metric.InstrumentKindHistogram { return metric.AggregationBase2ExponentialHistogram{ MaxSize: 160, MaxScale: 20, NoMinMax: false, } } return metric.DefaultAggregationSelector(kind) }) default: global.Warn("OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION is set to an invalid value, ignoring.", "value", s) } } } } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl000066400000000000000000000150701452547353200307540ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/envconfig_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestWithEnvTemporalityPreference(t *testing.T) { origReader := DefaultEnvOptionsReader.GetEnv tests := []struct { name string envValue string want map[metric.InstrumentKind]metricdata.Temporality }{ { name: "default do not set the selector", envValue: "", }, { name: "non-normative do not set the selector", envValue: "non-normative", }, { name: "cumulative", envValue: "cumulative", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.CumulativeTemporality, metric.InstrumentKindHistogram: metricdata.CumulativeTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, { name: "delta", envValue: "delta", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.DeltaTemporality, metric.InstrumentKindHistogram: metricdata.DeltaTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.DeltaTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, { name: "lowmemory", envValue: "lowmemory", want: map[metric.InstrumentKind]metricdata.Temporality{ metric.InstrumentKindCounter: metricdata.DeltaTemporality, metric.InstrumentKindHistogram: metricdata.DeltaTemporality, metric.InstrumentKindUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality, metric.InstrumentKindObservableGauge: metricdata.CumulativeTemporality, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { DefaultEnvOptionsReader.GetEnv = func(key string) string { if key == "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" { return tt.envValue } return origReader(key) } cfg := Config{} cfg = ApplyGRPCEnvConfigs(cfg) if tt.want == nil { // There is no function set, the SDK's default is used. assert.Nil(t, cfg.Metrics.TemporalitySelector) return } require.NotNil(t, cfg.Metrics.TemporalitySelector) for ik, want := range tt.want { assert.Equal(t, want, cfg.Metrics.TemporalitySelector(ik)) } }) } DefaultEnvOptionsReader.GetEnv = origReader } func TestWithEnvAggPreference(t *testing.T) { origReader := DefaultEnvOptionsReader.GetEnv tests := []struct { name string envValue string want map[metric.InstrumentKind]metric.Aggregation }{ { name: "default do not set the selector", envValue: "", }, { name: "non-normative do not set the selector", envValue: "non-normative", }, { name: "explicit_bucket_histogram", envValue: "explicit_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), metric.InstrumentKindHistogram: metric.DefaultAggregationSelector(metric.InstrumentKindHistogram), metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), }, }, { name: "base2_exponential_bucket_histogram", envValue: "base2_exponential_bucket_histogram", want: map[metric.InstrumentKind]metric.Aggregation{ metric.InstrumentKindCounter: metric.DefaultAggregationSelector(metric.InstrumentKindCounter), metric.InstrumentKindHistogram: metric.AggregationBase2ExponentialHistogram{ MaxSize: 160, MaxScale: 20, NoMinMax: false, }, metric.InstrumentKindUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindUpDownCounter), metric.InstrumentKindObservableCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableCounter), metric.InstrumentKindObservableUpDownCounter: metric.DefaultAggregationSelector(metric.InstrumentKindObservableUpDownCounter), metric.InstrumentKindObservableGauge: metric.DefaultAggregationSelector(metric.InstrumentKindObservableGauge), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { DefaultEnvOptionsReader.GetEnv = func(key string) string { if key == "OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION" { return tt.envValue } return origReader(key) } cfg := Config{} cfg = ApplyGRPCEnvConfigs(cfg) if tt.want == nil { // There is no function set, the SDK's default is used. assert.Nil(t, cfg.Metrics.AggregationSelector) return } require.NotNil(t, cfg.Metrics.AggregationSelector) for ik, want := range tt.want { assert.Equal(t, want, cfg.Metrics.AggregationSelector(ik)) } }) } DefaultEnvOptionsReader.GetEnv = origReader } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/oconf/options.go.tmpl000066400000000000000000000225721452547353200274370ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/options.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "crypto/tls" "fmt" "path" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "{{ .retryImportPath }}" "go.opentelemetry.io/otel/sdk/metric" ) const ( // DefaultMaxAttempts describes how many times the driver // should retry the sending of the payload in case of a // retryable error. DefaultMaxAttempts int = 5 // DefaultMetricsPath is a default URL path for endpoint that // receives metrics. DefaultMetricsPath string = "/v1/metrics" // DefaultBackoff is a default base backoff time used in the // exponential backoff strategy. DefaultBackoff time.Duration = 300 * time.Millisecond // DefaultTimeout is a default max waiting time for the backend to process // each span or metrics batch. DefaultTimeout time.Duration = 10 * time.Second ) type ( SignalConfig struct { Endpoint string Insecure bool TLSCfg *tls.Config Headers map[string]string Compression Compression Timeout time.Duration URLPath string // gRPC configurations GRPCCredentials credentials.TransportCredentials TemporalitySelector metric.TemporalitySelector AggregationSelector metric.AggregationSelector } Config struct { // Signal specific configurations Metrics SignalConfig RetryConfig retry.Config // gRPC configurations ReconnectionPeriod time.Duration ServiceConfig string DialOptions []grpc.DialOption GRPCConn *grpc.ClientConn } ) // NewHTTPConfig returns a new Config with all settings applied from opts and // any unset setting using the default HTTP config values. func NewHTTPConfig(opts ...HTTPOption) Config { cfg := Config{ Metrics: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), URLPath: DefaultMetricsPath, Compression: NoCompression, Timeout: DefaultTimeout, TemporalitySelector: metric.DefaultTemporalitySelector, AggregationSelector: metric.DefaultAggregationSelector, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyHTTPEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } cfg.Metrics.URLPath = cleanPath(cfg.Metrics.URLPath, DefaultMetricsPath) return cfg } // cleanPath returns a path with all spaces trimmed and all redundancies // removed. If urlPath is empty or cleaning it results in an empty string, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { tmp := path.Clean(strings.TrimSpace(urlPath)) if tmp == "." { return defaultPath } if !path.IsAbs(tmp) { tmp = fmt.Sprintf("/%s", tmp) } return tmp } // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { cfg := Config{ Metrics: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), URLPath: DefaultMetricsPath, Compression: NoCompression, Timeout: DefaultTimeout, TemporalitySelector: metric.DefaultTemporalitySelector, AggregationSelector: metric.DefaultAggregationSelector, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } // Priroritize GRPCCredentials over Insecure (passing both is an error). if cfg.Metrics.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials)) } else if cfg.Metrics.Insecure { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) } else { // Default to using the host's root CA. creds := credentials.NewTLS(nil) cfg.Metrics.GRPCCredentials = creds cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) } if cfg.Metrics.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, MinConnectTimeout: cfg.ReconnectionPeriod, } cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) } return cfg } type ( // GenericOption applies an option to the HTTP or gRPC driver. GenericOption interface { ApplyHTTPOption(Config) Config ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // HTTPOption applies an option to the HTTP driver. HTTPOption interface { ApplyHTTPOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // GRPCOption applies an option to the gRPC driver. GRPCOption interface { ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } ) // genericOption is an option that applies the same logic // for both gRPC and HTTP. type genericOption struct { fn func(Config) Config } func (g *genericOption) ApplyGRPCOption(cfg Config) Config { return g.fn(cfg) } func (g *genericOption) ApplyHTTPOption(cfg Config) Config { return g.fn(cfg) } func (genericOption) private() {} func newGenericOption(fn func(cfg Config) Config) GenericOption { return &genericOption{fn: fn} } // splitOption is an option that applies different logics // for gRPC and HTTP. type splitOption struct { httpFn func(Config) Config grpcFn func(Config) Config } func (g *splitOption) ApplyGRPCOption(cfg Config) Config { return g.grpcFn(cfg) } func (g *splitOption) ApplyHTTPOption(cfg Config) Config { return g.httpFn(cfg) } func (splitOption) private() {} func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { return &splitOption{httpFn: httpFn, grpcFn: grpcFn} } // httpOption is an option that is only applied to the HTTP driver. type httpOption struct { fn func(Config) Config } func (h *httpOption) ApplyHTTPOption(cfg Config) Config { return h.fn(cfg) } func (httpOption) private() {} func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { return &httpOption{fn: fn} } // grpcOption is an option that is only applied to the gRPC driver. type grpcOption struct { fn func(Config) Config } func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { return h.fn(cfg) } func (grpcOption) private() {} func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { return &grpcOption{fn: fn} } // Generic Options func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Endpoint = endpoint return cfg }) } func WithCompression(compression Compression) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Compression = compression return cfg }) } func WithURLPath(urlPath string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.URLPath = urlPath return cfg }) } func WithRetry(rc retry.Config) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.RetryConfig = rc return cfg }) } func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { return newSplitOption(func(cfg Config) Config { cfg.Metrics.TLSCfg = tlsCfg.Clone() return cfg }, func(cfg Config) Config { cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg) return cfg }) } func WithInsecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Insecure = true return cfg }) } func WithSecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Insecure = false return cfg }) } func WithHeaders(headers map[string]string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Headers = headers return cfg }) } func WithTimeout(duration time.Duration) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.Timeout = duration return cfg }) } func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.TemporalitySelector = selector return cfg }) } func WithAggregationSelector(selector metric.AggregationSelector) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Metrics.AggregationSelector = selector return cfg }) } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl000066400000000000000000000365031452547353200304750ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "errors" "testing" "time" "github.com/stretchr/testify/assert" "{{ .envconfigImportPath }}" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) const ( WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9 nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/ 1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH Lhnm4N/QDk5rek0= -----END CERTIFICATE----- ` WeakPrivateKey = ` -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV -----END PRIVATE KEY----- ` ) type env map[string]string func (e *env) getEnv(env string) string { return (*e)[env] } type fileReader map[string][]byte func (f *fileReader) readFile(filename string) ([]byte, error) { if b, ok := (*f)[filename]; ok { return b, nil } return nil, errors.New("file not found") } func TestConfigs(t *testing.T) { tlsCert, err := CreateTLSConfig([]byte(WeakCertificate)) assert.NoError(t, err) tests := []struct { name string opts []GenericOption env env fileReader fileReader asserts func(t *testing.T, c *Config, grpcOption bool) }{ { name: "Test default configs", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.Equal(t, "localhost:4317", c.Metrics.Endpoint) } else { assert.Equal(t, "localhost:4318", c.Metrics.Endpoint) } assert.Equal(t, NoCompression, c.Metrics.Compression) assert.Equal(t, map[string]string(nil), c.Metrics.Headers) assert.Equal(t, 10*time.Second, c.Metrics.Timeout) }, }, // Endpoint Tests { name: "Test With Endpoint", opts: []GenericOption{ WithEndpoint("someendpoint"), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "someendpoint", c.Metrics.Endpoint) }, }, { name: "Test Environment Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.False(t, c.Metrics.Insecure) if grpcOption { assert.Equal(t, "env.endpoint/prefix", c.Metrics.Endpoint) } else { assert.Equal(t, "env.endpoint", c.Metrics.Endpoint) assert.Equal(t, "/prefix/v1/metrics", c.Metrics.URLPath) } }, }, { name: "Test Environment Signal Specific Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var", "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "http://env.metrics.endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.True(t, c.Metrics.Insecure) assert.Equal(t, "env.metrics.endpoint", c.Metrics.Endpoint) if !grpcOption { assert.Equal(t, "/", c.Metrics.URLPath) } }, }, { name: "Test Mixed Environment and With Endpoint", opts: []GenericOption{ WithEndpoint("metrics_endpoint"), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint) }, }, { name: "Test Environment Endpoint with HTTP scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, { name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, { name: "Test Environment Endpoint with HTTPS scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, false, c.Metrics.Insecure) }, }, { name: "Test Environment Signal Specific Endpoint with uppercase scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "HtTp://env_metrics_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, }, // Certificate tests { name: "Test Default Certificate", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { assert.Nil(t, c.Metrics.TLSCfg) } }, }, { name: "Test With Certificate", opts: []GenericOption{ WithTLSClientConfig(tlsCert), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { // TODO: make sure gRPC's credentials actually works assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Signal Specific Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), "invalid_cert": []byte("invalid certificate file."), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Mixed Environment and With Certificate", opts: []GenericOption{}, env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, 1, len(c.Metrics.TLSCfg.RootCAs.Subjects())) } }, }, // Headers tests { name: "Test With Headers", opts: []GenericOption{ WithHeaders(map[string]string{"h1": "v1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers) }, }, { name: "Test Environment Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers) }, }, { name: "Test Environment Signal Specific Headers", env: map[string]string{ "OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers) }, }, { name: "Test Mixed Environment and With Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, opts: []GenericOption{ WithHeaders(map[string]string{"m1": "mv1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers) }, }, // Compression Tests { name: "Test With Compression", opts: []GenericOption{ WithCompression(GzipCompression), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Environment Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Environment Signal Specific Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Metrics.Compression) }, }, { name: "Test Mixed Environment and With Compression", opts: []GenericOption{ WithCompression(NoCompression), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, NoCompression, c.Metrics.Compression) }, }, // Timeout Tests { name: "Test With Timeout", opts: []GenericOption{ WithTimeout(time.Duration(5 * time.Second)), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, 5*time.Second, c.Metrics.Timeout) }, }, { name: "Test Environment Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 15*time.Second) }, }, { name: "Test Environment Signal Specific Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 28*time.Second) }, }, { name: "Test Mixed Environment and With Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000", }, opts: []GenericOption{ WithTimeout(5 * time.Second), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 5*time.Second) }, }, // Temporality Selector Tests { name: "WithTemporalitySelector", opts: []GenericOption{ WithTemporalitySelector(deltaSelector), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { // Function value comparisons are disallowed, test non-default // behavior of a TemporalitySelector here to ensure our "catch // all" was set. var undefinedKind metric.InstrumentKind got := c.Metrics.TemporalitySelector assert.Equal(t, metricdata.DeltaTemporality, got(undefinedKind)) }, }, // Aggregation Selector Tests { name: "WithAggregationSelector", opts: []GenericOption{ WithAggregationSelector(dropSelector), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { // Function value comparisons are disallowed, test non-default // behavior of a AggregationSelector here to ensure our "catch // all" was set. var undefinedKind metric.InstrumentKind got := c.Metrics.AggregationSelector assert.Equal(t, metric.AggregationDrop{}, got(undefinedKind)) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { origEOR := DefaultEnvOptionsReader DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: tt.env.getEnv, ReadFile: tt.fileReader.readFile, Namespace: "OTEL_EXPORTER_OTLP", } t.Cleanup(func() { DefaultEnvOptionsReader = origEOR }) // Tests Generic options as HTTP Options cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...) tt.asserts(t, &cfg, false) // Tests Generic options as gRPC Options cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...) tt.asserts(t, &cfg, true) }) } } func dropSelector(metric.InstrumentKind) metric.Aggregation { return metric.AggregationDrop{} } func deltaSelector(metric.InstrumentKind) metricdata.Temporality { return metricdata.DeltaTemporality } func asHTTPOptions(opts []GenericOption) []HTTPOption { converted := make([]HTTPOption, len(opts)) for i, o := range opts { converted[i] = NewHTTPOption(o.ApplyHTTPOption) } return converted } func asGRPCOptions(opts []GenericOption) []GRPCOption { converted := make([]GRPCOption, len(opts)) for i, o := range opts { converted[i] = NewGRPCOption(o.ApplyGRPCOption) } return converted } func TestCleanPath(t *testing.T) { type args struct { urlPath string defaultPath string } tests := []struct { name string args args want string }{ { name: "clean empty path", args: args{ urlPath: "", defaultPath: "DefaultPath", }, want: "DefaultPath", }, { name: "clean metrics path", args: args{ urlPath: "/prefix/v1/metrics", defaultPath: "DefaultMetricsPath", }, want: "/prefix/v1/metrics", }, { name: "clean traces path", args: args{ urlPath: "https://env_endpoint", defaultPath: "DefaultTracesPath", }, want: "/https:/env_endpoint", }, { name: "spaces trimmed", args: args{ urlPath: " /dir", }, want: "/dir", }, { name: "clean path empty", args: args{ urlPath: "dir/..", defaultPath: "DefaultTracesPath", }, want: "DefaultTracesPath", }, { name: "make absolute", args: args{ urlPath: "dir/a", }, want: "/dir/a", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want { t.Errorf("CleanPath() = %v, want %v", got, tt.want) } }) } } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl000066400000000000000000000042431452547353200303340ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import "time" const ( // DefaultCollectorGRPCPort is the default gRPC port of the collector. DefaultCollectorGRPCPort uint16 = 4317 // DefaultCollectorHTTPPort is the default HTTP port of the collector. DefaultCollectorHTTPPort uint16 = 4318 // DefaultCollectorHost is the host address the Exporter will attempt // connect to if no collector address is provided. DefaultCollectorHost string = "localhost" ) // Compression describes the compression used for payloads sent to the // collector. type Compression int const ( // NoCompression tells the driver to send payloads without // compression. NoCompression Compression = iota // GzipCompression tells the driver to send payloads after // compressing them with gzip. GzipCompression ) // RetrySettings defines configuration for retrying batches in case of export failure // using an exponential backoff. type RetrySettings struct { // Enabled indicates whether to not retry sending batches in case of export failure. Enabled bool // InitialInterval the time to wait after the first failure before retrying. InitialInterval time.Duration // MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between // consecutive retries will always be `MaxInterval`. MaxInterval time.Duration // MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch. // Once this value is reached, the data is discarded. MaxElapsedTime time.Duration } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl000066400000000000000000000026441452547353200265440ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/oconf/tls.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package oconf import ( "crypto/tls" "crypto/x509" "errors" "os" ) // ReadTLSConfigFromFile reads a PEM certificate file and creates // a tls.Config that will use this certifate to verify a server certificate. func ReadTLSConfigFromFile(path string) (*tls.Config, error) { b, err := os.ReadFile(path) if err != nil { return nil, err } return CreateTLSConfig(b) } // CreateTLSConfig creates a tls.Config from a raw certificate bytes // to verify a server certificate. func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return &tls.Config{ RootCAs: cp, }, nil } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/otest/000077500000000000000000000000001452547353200244645ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/otest/client.go.tmpl000066400000000000000000000222441452547353200272500ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/client.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest import ( "context" "fmt" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" "go.opentelemetry.io/otel" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) var ( // Sat Jan 01 2000 00:00:00 GMT+0000. start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0)) end = start.Add(30 * time.Second) kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "alice"}, }} kvBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "bob"}, }} kvSrvName = &cpb.KeyValue{Key: "service.name", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "test server"}, }} kvSrvVer = &cpb.KeyValue{Key: "service.version", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"}, }} min, max, sum = 2.0, 4.0, 90.0 hdp = []*mpb.HistogramDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sum, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: &min, Max: &max, }, } hist = &mpb.Histogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: hdp, } dPtsInt64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, { Attributes: []*cpb.KeyValue{kvBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 2}, }, } dPtsFloat64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{kvAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0}, }, { Attributes: []*cpb.KeyValue{kvBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0}, }, } sumInt64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, IsMonotonic: true, DataPoints: dPtsInt64, } sumFloat64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, IsMonotonic: false, DataPoints: dPtsFloat64, } gaugeInt64 = &mpb.Gauge{DataPoints: dPtsInt64} gaugeFloat64 = &mpb.Gauge{DataPoints: dPtsFloat64} metrics = []*mpb.Metric{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: gaugeInt64}, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: gaugeFloat64}, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: sumInt64}, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: sumFloat64}, }, { Name: "histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: hist}, }, } scope = &cpb.InstrumentationScope{ Name: "test/code/path", Version: "v0.1.0", } scopeMetrics = []*mpb.ScopeMetrics{ { Scope: scope, Metrics: metrics, SchemaUrl: semconv.SchemaURL, }, } res = &rpb.Resource{ Attributes: []*cpb.KeyValue{kvSrvName, kvSrvVer}, } resourceMetrics = &mpb.ResourceMetrics{ Resource: res, ScopeMetrics: scopeMetrics, SchemaUrl: semconv.SchemaURL, } ) type Client interface { UploadMetrics(context.Context, *mpb.ResourceMetrics) error ForceFlush(context.Context) error Shutdown(context.Context) error } // ClientFactory is a function that when called returns a // Client implementation that is connected to also returned // Collector implementation. The Client is ready to upload metric data to the // Collector which is ready to store that data. // // If resultCh is not nil, the returned Collector needs to use the responses // from that channel to send back to the client for every export request. type ClientFactory func(resultCh <-chan ExportResult) (Client, Collector) // RunClientTests runs a suite of Client integration tests. For example: // // t.Run("Integration", RunClientTests(factory)) func RunClientTests(f ClientFactory) func(*testing.T) { return func(t *testing.T) { t.Run("ClientHonorsContextErrors", func(t *testing.T) { t.Run("Shutdown", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return c.Shutdown })) t.Run("ForceFlush", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return c.ForceFlush })) t.Run("UploadMetrics", testCtxErrs(func() func(context.Context) error { c, _ := f(nil) return func(ctx context.Context) error { return c.UploadMetrics(ctx, nil) } })) }) t.Run("ForceFlushFlushes", func(t *testing.T) { ctx := context.Background() client, collector := f(nil) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.ForceFlush(ctx)) rm := collector.Collect().Dump() // Data correctness is not important, just it was received. require.Greater(t, len(rm), 0, "no data uploaded") require.NoError(t, client.Shutdown(ctx)) rm = collector.Collect().Dump() assert.Len(t, rm, 0, "client did not flush all data") }) t.Run("UploadMetrics", func(t *testing.T) { ctx := context.Background() client, coll := f(nil) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.Shutdown(ctx)) got := coll.Collect().Dump() require.Len(t, got, 1, "upload of one ResourceMetrics") diff := cmp.Diff(got[0], resourceMetrics, cmp.Comparer(proto.Equal)) if diff != "" { t.Fatalf("unexpected ResourceMetrics:\n%s", diff) } }) t.Run("PartialSuccess", func(t *testing.T) { const n, msg = 2, "bad data" rCh := make(chan ExportResult, 3) rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{ PartialSuccess: &collpb.ExportMetricsPartialSuccess{ RejectedDataPoints: n, ErrorMessage: msg, }, }, } rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{ PartialSuccess: &collpb.ExportMetricsPartialSuccess{ // Should not be logged. RejectedDataPoints: 0, ErrorMessage: "", }, }, } rCh <- ExportResult{ Response: &collpb.ExportMetricsServiceResponse{}, } ctx := context.Background() client, _ := f(rCh) defer func(orig otel.ErrorHandler) { otel.SetErrorHandler(orig) }(otel.GetErrorHandler()) errs := []error{} eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) }) otel.SetErrorHandler(eh) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) require.NoError(t, client.Shutdown(ctx)) require.Equal(t, 1, len(errs)) want := fmt.Sprintf("%s (%d metric data points rejected)", msg, n) assert.ErrorContains(t, errs[0], want) }) } } func testCtxErrs(factory func() func(context.Context) error) func(t *testing.T) { return func(t *testing.T) { t.Helper() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) t.Run("DeadlineExceeded", func(t *testing.T) { innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) t.Cleanup(innerCancel) <-innerCtx.Done() f := factory() assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded) }) t.Run("Canceled", func(t *testing.T) { innerCtx, innerCancel := context.WithCancel(ctx) innerCancel() f := factory() assert.ErrorIs(t, f(innerCtx), context.Canceled) }) } } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl000066400000000000000000000044011452547353200303020ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/client_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest import ( "context" "testing" "go.opentelemetry.io/otel" "{{ .internalImportPath }}" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" cpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) type client struct { rCh <-chan ExportResult storage *Storage } func (c *client) Temporality(k metric.InstrumentKind) metricdata.Temporality { return metric.DefaultTemporalitySelector(k) } func (c *client) Aggregation(k metric.InstrumentKind) metric.Aggregation { return metric.DefaultAggregationSelector(k) } func (c *client) Collect() *Storage { return c.storage } func (c *client) UploadMetrics(ctx context.Context, rm *mpb.ResourceMetrics) error { c.storage.Add(&cpb.ExportMetricsServiceRequest{ ResourceMetrics: []*mpb.ResourceMetrics{rm}, }) if c.rCh != nil { r := <-c.rCh if r.Response != nil && r.Response.GetPartialSuccess() != nil { msg := r.Response.GetPartialSuccess().GetErrorMessage() n := r.Response.GetPartialSuccess().GetRejectedDataPoints() if msg != "" || n > 0 { otel.Handle(internal.MetricPartialSuccessError(n, msg)) } } return r.Err } return ctx.Err() } func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() } func (c *client) Shutdown(ctx context.Context) error { return ctx.Err() } func TestClientTests(t *testing.T) { factory := func(rCh <-chan ExportResult) (Client, Collector) { c := &client{rCh: rCh, storage: NewStorage()} return c, c } t.Run("Integration", RunClientTests(factory)) } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl000066400000000000000000000274761452547353200277740ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/otest/collector.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otest import ( "bytes" "compress/gzip" "context" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/tls" "crypto/x509" "crypto/x509/pkix" // nolint:depguard // This is for testing. "encoding/pem" "errors" "fmt" "io" "math/big" "net" "net/http" "net/url" "sync" "time" "google.golang.org/grpc" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/proto" "{{ .oconfImportPath }}" collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) // Collector is the collection target a Client sends metric uploads to. type Collector interface { Collect() *Storage } type ExportResult struct { Response *collpb.ExportMetricsServiceResponse Err error } // Storage stores uploaded OTLP metric data in their proto form. type Storage struct { dataMu sync.Mutex data []*mpb.ResourceMetrics } // NewStorage returns a configure storage ready to store received requests. func NewStorage() *Storage { return &Storage{} } // Add adds the request to the Storage. func (s *Storage) Add(request *collpb.ExportMetricsServiceRequest) { s.dataMu.Lock() defer s.dataMu.Unlock() s.data = append(s.data, request.ResourceMetrics...) } // Dump returns all added ResourceMetrics and clears the storage. func (s *Storage) Dump() []*mpb.ResourceMetrics { s.dataMu.Lock() defer s.dataMu.Unlock() var data []*mpb.ResourceMetrics data, s.data = s.data, []*mpb.ResourceMetrics{} return data } // GRPCCollector is an OTLP gRPC server that collects all requests it receives. type GRPCCollector struct { collpb.UnimplementedMetricsServiceServer headersMu sync.Mutex headers metadata.MD storage *Storage resultCh <-chan ExportResult listener net.Listener srv *grpc.Server } // NewGRPCCollector returns a *GRPCCollector that is listening at the provided // endpoint. // // If endpoint is an empty string, the returned collector will be listening on // the localhost interface at an OS chosen port. // // If errCh is not nil, the collector will respond to Export calls with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. func NewGRPCCollector(endpoint string, resultCh <-chan ExportResult) (*GRPCCollector, error) { if endpoint == "" { endpoint = "localhost:0" } c := &GRPCCollector{ storage: NewStorage(), resultCh: resultCh, } var err error c.listener, err = net.Listen("tcp", endpoint) if err != nil { return nil, err } c.srv = grpc.NewServer() collpb.RegisterMetricsServiceServer(c.srv, c) go func() { _ = c.srv.Serve(c.listener) }() return c, nil } // Shutdown shuts down the gRPC server closing all open connections and // listeners immediately. func (c *GRPCCollector) Shutdown() { c.srv.Stop() } // Addr returns the net.Addr c is listening at. func (c *GRPCCollector) Addr() net.Addr { return c.listener.Addr() } // Collect returns the Storage holding all collected requests. func (c *GRPCCollector) Collect() *Storage { return c.storage } // Headers returns the headers received for all requests. func (c *GRPCCollector) Headers() map[string][]string { // Makes a copy. c.headersMu.Lock() defer c.headersMu.Unlock() return metadata.Join(c.headers) } // Export handles the export req. func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) { c.storage.Add(req) if h, ok := metadata.FromIncomingContext(ctx); ok { c.headersMu.Lock() c.headers = metadata.Join(c.headers, h) c.headersMu.Unlock() } if c.resultCh != nil { r := <-c.resultCh if r.Response == nil { return &collpb.ExportMetricsServiceResponse{}, r.Err } return r.Response, r.Err } return &collpb.ExportMetricsServiceResponse{}, nil } var emptyExportMetricsServiceResponse = func() []byte { body := collpb.ExportMetricsServiceResponse{} r, err := proto.Marshal(&body) if err != nil { panic(err) } return r }() type HTTPResponseError struct { Err error Status int Header http.Header } func (e *HTTPResponseError) Error() string { return fmt.Sprintf("%d: %s", e.Status, e.Err) } func (e *HTTPResponseError) Unwrap() error { return e.Err } // HTTPCollector is an OTLP HTTP server that collects all requests it receives. type HTTPCollector struct { plainTextResponse bool headersMu sync.Mutex headers http.Header storage *Storage resultCh <-chan ExportResult listener net.Listener srv *http.Server } // NewHTTPCollector returns a *HTTPCollector that is listening at the provided // endpoint. // // If endpoint is an empty string, the returned collector will be listening on // the localhost interface at an OS chosen port, not use TLS, and listen at the // default OTLP metric endpoint path ("/v1/metrics"). If the endpoint contains // a prefix of "https" the server will generate weak self-signed TLS // certificates and use them to server data. If the endpoint contains a path, // that path will be used instead of the default OTLP metric endpoint path. // // If errCh is not nil, the collector will respond to HTTP requests with errors // sent on that channel. This means that if errCh is not nil Export calls will // block until an error is received. func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } if u.Host == "" { u.Host = "localhost:0" } if u.Path == "" { u.Path = oconf.DefaultMetricsPath } c := &HTTPCollector{ headers: http.Header{}, storage: NewStorage(), resultCh: resultCh, } for _, opt := range opts { opt(c) } c.listener, err = net.Listen("tcp", u.Host) if err != nil { return nil, err } mux := http.NewServeMux() mux.Handle(u.Path, http.HandlerFunc(c.handler)) c.srv = &http.Server{ Handler: mux, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, } if u.Scheme == "https" { cert, err := weakCertificate() if err != nil { return nil, err } c.srv.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, } go func() { _ = c.srv.ServeTLS(c.listener, "", "") }() } else { go func() { _ = c.srv.Serve(c.listener) }() } return c, nil } // WithHTTPCollectorRespondingPlainText makes the HTTPCollector return // a plaintext, instead of protobuf, response. func WithHTTPCollectorRespondingPlainText() func(*HTTPCollector) { return func(s *HTTPCollector) { s.plainTextResponse = true } } // Shutdown shuts down the HTTP server closing all open connections and // listeners. func (c *HTTPCollector) Shutdown(ctx context.Context) error { return c.srv.Shutdown(ctx) } // Addr returns the net.Addr c is listening at. func (c *HTTPCollector) Addr() net.Addr { return c.listener.Addr() } // Collect returns the Storage holding all collected requests. func (c *HTTPCollector) Collect() *Storage { return c.storage } // Headers returns the headers received for all requests. func (c *HTTPCollector) Headers() map[string][]string { // Makes a copy. c.headersMu.Lock() defer c.headersMu.Unlock() return c.headers.Clone() } func (c *HTTPCollector) handler(w http.ResponseWriter, r *http.Request) { c.respond(w, c.record(r)) } func (c *HTTPCollector) record(r *http.Request) ExportResult { // Currently only supports protobuf. if v := r.Header.Get("Content-Type"); v != "application/x-protobuf" { err := fmt.Errorf("content-type not supported: %s", v) return ExportResult{Err: err} } body, err := c.readBody(r) if err != nil { return ExportResult{Err: err} } pbRequest := &collpb.ExportMetricsServiceRequest{} err = proto.Unmarshal(body, pbRequest) if err != nil { return ExportResult{ Err: &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, }, } } c.storage.Add(pbRequest) c.headersMu.Lock() for k, vals := range r.Header { for _, v := range vals { c.headers.Add(k, v) } } c.headersMu.Unlock() if c.resultCh != nil { return <-c.resultCh } return ExportResult{Err: err} } func (c *HTTPCollector) readBody(r *http.Request) (body []byte, err error) { var reader io.ReadCloser switch r.Header.Get("Content-Encoding") { case "gzip": reader, err = gzip.NewReader(r.Body) if err != nil { _ = reader.Close() return nil, &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, } } default: reader = r.Body } defer func() { cErr := reader.Close() if err == nil && cErr != nil { err = &HTTPResponseError{ Err: cErr, Status: http.StatusInternalServerError, } } }() body, err = io.ReadAll(reader) if err != nil { err = &HTTPResponseError{ Err: err, Status: http.StatusInternalServerError, } } return body, err } func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) { if resp.Err != nil { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") var e *HTTPResponseError if errors.As(resp.Err, &e) { for k, vals := range e.Header { for _, v := range vals { w.Header().Add(k, v) } } w.WriteHeader(e.Status) fmt.Fprintln(w, e.Error()) } else { w.WriteHeader(http.StatusBadRequest) fmt.Fprintln(w, resp.Err.Error()) } return } if c.plainTextResponse { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte("OK")) return } w.Header().Set("Content-Type", "application/x-protobuf") w.WriteHeader(http.StatusOK) if resp.Response == nil { _, _ = w.Write(emptyExportMetricsServiceResponse) } else { r, err := proto.Marshal(resp.Response) if err != nil { panic(err) } _, _ = w.Write(r) } } // Based on https://golang.org/src/crypto/tls/generate_cert.go, // simplified and weakened. func weakCertificate() (tls.Certificate, error) { priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) if err != nil { return tls.Certificate{}, err } notBefore := time.Now() notAfter := notBefore.Add(time.Hour) max := new(big.Int).Lsh(big.NewInt(1), 128) sn, err := rand.Int(rand.Reader, max) if err != nil { return tls.Certificate{}, err } tmpl := x509.Certificate{ SerialNumber: sn, Subject: pkix.Name{Organization: []string{"otel-go"}}, NotBefore: notBefore, NotAfter: notAfter, KeyUsage: x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, BasicConstraintsValid: true, DNSNames: []string{"localhost"}, IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)}, } derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) if err != nil { return tls.Certificate{}, err } var certBuf bytes.Buffer err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) if err != nil { return tls.Certificate{}, err } privBytes, err := x509.MarshalPKCS8PrivateKey(priv) if err != nil { return tls.Certificate{}, err } var privBuf bytes.Buffer err = pem.Encode(&privBuf, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}) if err != nil { return tls.Certificate{}, err } return tls.X509KeyPair(certBuf.Bytes(), privBuf.Bytes()) } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/transform/000077500000000000000000000000001452547353200253415ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl000066400000000000000000000074361452547353200306600ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/attribute.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "go.opentelemetry.io/otel/attribute" cpb "go.opentelemetry.io/proto/otlp/common/v1" ) // AttrIter transforms an attribute iterator into OTLP key-values. func AttrIter(iter attribute.Iterator) []*cpb.KeyValue { l := iter.Len() if l == 0 { return nil } out := make([]*cpb.KeyValue, 0, l) for iter.Next() { out = append(out, KeyValue(iter.Attribute())) } return out } // KeyValues transforms a slice of attribute KeyValues into OTLP key-values. func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue { if len(attrs) == 0 { return nil } out := make([]*cpb.KeyValue, 0, len(attrs)) for _, kv := range attrs { out = append(out, KeyValue(kv)) } return out } // KeyValue transforms an attribute KeyValue into an OTLP key-value. func KeyValue(kv attribute.KeyValue) *cpb.KeyValue { return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} } // Value transforms an attribute Value into an OTLP AnyValue. func Value(v attribute.Value) *cpb.AnyValue { av := new(cpb.AnyValue) switch v.Type() { case attribute.BOOL: av.Value = &cpb.AnyValue_BoolValue{ BoolValue: v.AsBool(), } case attribute.BOOLSLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: boolSliceValues(v.AsBoolSlice()), }, } case attribute.INT64: av.Value = &cpb.AnyValue_IntValue{ IntValue: v.AsInt64(), } case attribute.INT64SLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: int64SliceValues(v.AsInt64Slice()), }, } case attribute.FLOAT64: av.Value = &cpb.AnyValue_DoubleValue{ DoubleValue: v.AsFloat64(), } case attribute.FLOAT64SLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: float64SliceValues(v.AsFloat64Slice()), }, } case attribute.STRING: av.Value = &cpb.AnyValue_StringValue{ StringValue: v.AsString(), } case attribute.STRINGSLICE: av.Value = &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: stringSliceValues(v.AsStringSlice()), }, } default: av.Value = &cpb.AnyValue_StringValue{ StringValue: "INVALID", } } return av } func boolSliceValues(vals []bool) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_BoolValue{ BoolValue: v, }, } } return converted } func int64SliceValues(vals []int64) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_IntValue{ IntValue: v, }, } } return converted } func float64SliceValues(vals []float64) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_DoubleValue{ DoubleValue: v, }, } } return converted } func stringSliceValues(vals []string) []*cpb.AnyValue { converted := make([]*cpb.AnyValue, len(vals)) for i, v := range vals { converted[i] = &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{ StringValue: v, }, } } return converted } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl000066400000000000000000000131021452547353200317020ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/attribute_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" cpb "go.opentelemetry.io/proto/otlp/common/v1" ) var ( attrBool = attribute.Bool("bool", true) attrBoolSlice = attribute.BoolSlice("bool slice", []bool{true, false}) attrInt = attribute.Int("int", 1) attrIntSlice = attribute.IntSlice("int slice", []int{-1, 1}) attrInt64 = attribute.Int64("int64", 1) attrInt64Slice = attribute.Int64Slice("int64 slice", []int64{-1, 1}) attrFloat64 = attribute.Float64("float64", 1) attrFloat64Slice = attribute.Float64Slice("float64 slice", []float64{-1, 1}) attrString = attribute.String("string", "o") attrStringSlice = attribute.StringSlice("string slice", []string{"o", "n"}) attrInvalid = attribute.KeyValue{ Key: attribute.Key("invalid"), Value: attribute.Value{}, } valBoolTrue = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: true}} valBoolFalse = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: false}} valBoolSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valBoolTrue, valBoolFalse}, }, }} valIntOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: 1}} valIntNOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: -1}} valIntSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valIntNOne, valIntOne}, }, }} valDblOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: 1}} valDblNOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: -1}} valDblSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valDblNOne, valDblOne}, }, }} valStrO = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "o"}} valStrN = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "n"}} valStrSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ ArrayValue: &cpb.ArrayValue{ Values: []*cpb.AnyValue{valStrO, valStrN}, }, }} kvBool = &cpb.KeyValue{Key: "bool", Value: valBoolTrue} kvBoolSlice = &cpb.KeyValue{Key: "bool slice", Value: valBoolSlice} kvInt = &cpb.KeyValue{Key: "int", Value: valIntOne} kvIntSlice = &cpb.KeyValue{Key: "int slice", Value: valIntSlice} kvInt64 = &cpb.KeyValue{Key: "int64", Value: valIntOne} kvInt64Slice = &cpb.KeyValue{Key: "int64 slice", Value: valIntSlice} kvFloat64 = &cpb.KeyValue{Key: "float64", Value: valDblOne} kvFloat64Slice = &cpb.KeyValue{Key: "float64 slice", Value: valDblSlice} kvString = &cpb.KeyValue{Key: "string", Value: valStrO} kvStringSlice = &cpb.KeyValue{Key: "string slice", Value: valStrSlice} kvInvalid = &cpb.KeyValue{ Key: "invalid", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "INVALID"}, }, } ) type attributeTest struct { name string in []attribute.KeyValue want []*cpb.KeyValue } func TestAttributeTransforms(t *testing.T) { for _, test := range []attributeTest{ {"nil", nil, nil}, {"empty", []attribute.KeyValue{}, nil}, { "invalid", []attribute.KeyValue{attrInvalid}, []*cpb.KeyValue{kvInvalid}, }, { "bool", []attribute.KeyValue{attrBool}, []*cpb.KeyValue{kvBool}, }, { "bool slice", []attribute.KeyValue{attrBoolSlice}, []*cpb.KeyValue{kvBoolSlice}, }, { "int", []attribute.KeyValue{attrInt}, []*cpb.KeyValue{kvInt}, }, { "int slice", []attribute.KeyValue{attrIntSlice}, []*cpb.KeyValue{kvIntSlice}, }, { "int64", []attribute.KeyValue{attrInt64}, []*cpb.KeyValue{kvInt64}, }, { "int64 slice", []attribute.KeyValue{attrInt64Slice}, []*cpb.KeyValue{kvInt64Slice}, }, { "float64", []attribute.KeyValue{attrFloat64}, []*cpb.KeyValue{kvFloat64}, }, { "float64 slice", []attribute.KeyValue{attrFloat64Slice}, []*cpb.KeyValue{kvFloat64Slice}, }, { "string", []attribute.KeyValue{attrString}, []*cpb.KeyValue{kvString}, }, { "string slice", []attribute.KeyValue{attrStringSlice}, []*cpb.KeyValue{kvStringSlice}, }, { "all", []attribute.KeyValue{ attrBool, attrBoolSlice, attrInt, attrIntSlice, attrInt64, attrInt64Slice, attrFloat64, attrFloat64Slice, attrString, attrStringSlice, attrInvalid, }, []*cpb.KeyValue{ kvBool, kvBoolSlice, kvInt, kvIntSlice, kvInt64, kvInt64Slice, kvFloat64, kvFloat64Slice, kvString, kvStringSlice, kvInvalid, }, }, } { t.Run(test.name, func(t *testing.T) { t.Run("KeyValues", func(t *testing.T) { assert.ElementsMatch(t, test.want, KeyValues(test.in)) }) t.Run("AttrIter", func(t *testing.T) { s := attribute.NewSet(test.in...) assert.ElementsMatch(t, test.want, AttrIter(s.Iter())) }) }) } } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/transform/error.go.tmpl000066400000000000000000000056371452547353200300070ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/error.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "errors" "fmt" "strings" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) var ( errUnknownAggregation = errors.New("unknown aggregation") errUnknownTemporality = errors.New("unknown temporality") ) type errMetric struct { m *mpb.Metric err error } func (e errMetric) Unwrap() error { return e.err } func (e errMetric) Error() string { format := "invalid metric (name: %q, description: %q, unit: %q): %s" return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err) } func (e errMetric) Is(target error) bool { return errors.Is(e.err, target) } // multiErr is used by the data-type transform functions to wrap multiple // errors into a single return value. The error message will show all errors // as a list and scope them by the datatype name that is returning them. type multiErr struct { datatype string errs []error } // errOrNil returns nil if e contains no errors, otherwise it returns e. func (e *multiErr) errOrNil() error { if len(e.errs) == 0 { return nil } return e } // append adds err to e. If err is a multiErr, its errs are flattened into e. func (e *multiErr) append(err error) { // Do not use errors.As here, this should only be flattened one layer. If // there is a *multiErr several steps down the chain, all the errors above // it will be discarded if errors.As is used instead. switch other := err.(type) { case *multiErr: // Flatten err errors into e. e.errs = append(e.errs, other.errs...) default: e.errs = append(e.errs, err) } } func (e *multiErr) Error() string { es := make([]string, len(e.errs)) for i, err := range e.errs { es[i] = fmt.Sprintf("* %s", err) } format := "%d errors occurred transforming %s:\n\t%s" return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t")) } func (e *multiErr) Unwrap() error { switch len(e.errs) { case 0: return nil case 1: return e.errs[0] } // Return a multiErr without the leading error. cp := &multiErr{ datatype: e.datatype, errs: make([]error, len(e.errs)-1), } copy(cp.errs, e.errs[1:]) return cp } func (e *multiErr) Is(target error) bool { if len(e.errs) == 0 { return false } // Check if the first error is target. return errors.Is(e.errs[0], target) } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl000066400000000000000000000050751452547353200310420ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/error_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "strings" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( e0 = errMetric{m: pbMetrics[0], err: errUnknownAggregation} e1 = errMetric{m: pbMetrics[1], err: errUnknownTemporality} ) type testingErr struct{} func (testingErr) Error() string { return "testing error" } // errFunc is a non-comparable error type. type errFunc func() string func (e errFunc) Error() string { return e() } func TestMultiErr(t *testing.T) { const name = "TestMultiErr" me := &multiErr{datatype: name} t.Run("ErrOrNil", func(t *testing.T) { require.Nil(t, me.errOrNil()) me.errs = []error{e0} assert.Error(t, me.errOrNil()) }) var testErr testingErr t.Run("AppendError", func(t *testing.T) { me.append(testErr) assert.Equal(t, testErr, me.errs[len(me.errs)-1]) }) t.Run("AppendFlattens", func(t *testing.T) { other := &multiErr{datatype: "OtherTestMultiErr", errs: []error{e1}} me.append(other) assert.Equal(t, e1, me.errs[len(me.errs)-1]) }) t.Run("ErrorMessage", func(t *testing.T) { // Test the overall structure of the message, but not the exact // language so this doesn't become a change-indicator. msg := me.Error() lines := strings.Split(msg, "\n") assert.Equalf(t, 4, len(lines), "expected a 4 line error message, got:\n\n%s", msg) assert.Contains(t, msg, name) assert.Contains(t, msg, e0.Error()) assert.Contains(t, msg, testErr.Error()) assert.Contains(t, msg, e1.Error()) }) t.Run("ErrorIs", func(t *testing.T) { assert.ErrorIs(t, me, errUnknownAggregation) assert.ErrorIs(t, me, e0) assert.ErrorIs(t, me, testErr) assert.ErrorIs(t, me, errUnknownTemporality) assert.ErrorIs(t, me, e1) errUnknown := errFunc(func() string { return "unknown error" }) assert.NotErrorIs(t, me, errUnknown) var empty multiErr assert.NotErrorIs(t, &empty, errUnknownTemporality) }) } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl000066400000000000000000000225021452547353200307610ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/metricdata.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package transform provides transformation functionality from the // sdk/metric/metricdata data-types into OTLP data-types. package transform import ( "fmt" "time" "go.opentelemetry.io/otel/sdk/metric/metricdata" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) // ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm // contains invalid ScopeMetrics, an error will be returned along with an OTLP // ResourceMetrics that contains partial OTLP ScopeMetrics. func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) { sms, err := ScopeMetrics(rm.ScopeMetrics) return &mpb.ResourceMetrics{ Resource: &rpb.Resource{ Attributes: AttrIter(rm.Resource.Iter()), }, ScopeMetrics: sms, SchemaUrl: rm.Resource.SchemaURL(), }, err } // ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If // sms contains invalid metric values, an error will be returned along with a // slice that contains partial OTLP ScopeMetrics. func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { errs := &multiErr{datatype: "ScopeMetrics"} out := make([]*mpb.ScopeMetrics, 0, len(sms)) for _, sm := range sms { ms, err := Metrics(sm.Metrics) if err != nil { errs.append(err) } out = append(out, &mpb.ScopeMetrics{ Scope: &cpb.InstrumentationScope{ Name: sm.Scope.Name, Version: sm.Scope.Version, }, Metrics: ms, SchemaUrl: sm.Scope.SchemaURL, }) } return out, errs.errOrNil() } // Metrics returns a slice of OTLP Metric generated from ms. If ms contains // invalid metric values, an error will be returned along with a slice that // contains partial OTLP Metrics. func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) { errs := &multiErr{datatype: "Metrics"} out := make([]*mpb.Metric, 0, len(ms)) for _, m := range ms { o, err := metric(m) if err != nil { // Do not include invalid data. Drop the metric, report the error. errs.append(errMetric{m: o, err: err}) continue } out = append(out, o) } return out, errs.errOrNil() } func metric(m metricdata.Metrics) (*mpb.Metric, error) { var err error out := &mpb.Metric{ Name: m.Name, Description: m.Description, Unit: string(m.Unit), } switch a := m.Data.(type) { case metricdata.Gauge[int64]: out.Data = Gauge[int64](a) case metricdata.Gauge[float64]: out.Data = Gauge[float64](a) case metricdata.Sum[int64]: out.Data, err = Sum[int64](a) case metricdata.Sum[float64]: out.Data, err = Sum[float64](a) case metricdata.Histogram[int64]: out.Data, err = Histogram(a) case metricdata.Histogram[float64]: out.Data, err = Histogram(a) case metricdata.ExponentialHistogram[int64]: out.Data, err = ExponentialHistogram(a) case metricdata.ExponentialHistogram[float64]: out.Data, err = ExponentialHistogram(a) default: return out, fmt.Errorf("%w: %T", errUnknownAggregation, a) } return out, err } // Gauge returns an OTLP Metric_Gauge generated from g. func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge { return &mpb.Metric_Gauge{ Gauge: &mpb.Gauge{ DataPoints: DataPoints(g.DataPoints), }, } } // Sum returns an OTLP Metric_Sum generated from s. An error is returned // if the temporality of s is unknown. func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) { t, err := Temporality(s.Temporality) if err != nil { return nil, err } return &mpb.Metric_Sum{ Sum: &mpb.Sum{ AggregationTemporality: t, IsMonotonic: s.IsMonotonic, DataPoints: DataPoints(s.DataPoints), }, }, nil } // DataPoints returns a slice of OTLP NumberDataPoint generated from dPts. func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint { out := make([]*mpb.NumberDataPoint, 0, len(dPts)) for _, dPt := range dPts { ndp := &mpb.NumberDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), } switch v := any(dPt.Value).(type) { case int64: ndp.Value = &mpb.NumberDataPoint_AsInt{ AsInt: v, } case float64: ndp.Value = &mpb.NumberDataPoint_AsDouble{ AsDouble: v, } } out = append(out, ndp) } return out } // Histogram returns an OTLP Metric_Histogram generated from h. An error is // returned if the temporality of h is unknown. func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err } return &mpb.Metric_Histogram{ Histogram: &mpb.Histogram{ AggregationTemporality: t, DataPoints: HistogramDataPoints(h.DataPoints), }, }, nil } // HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated // from dPts. func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint { out := make([]*mpb.HistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) hdp := &mpb.HistogramDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), Count: dPt.Count, Sum: &sum, BucketCounts: dPt.BucketCounts, ExplicitBounds: dPt.Bounds, } if v, ok := dPt.Min.Value(); ok { vF64 := float64(v) hdp.Min = &vF64 } if v, ok := dPt.Max.Value(); ok { vF64 := float64(v) hdp.Max = &vF64 } out = append(out, hdp) } return out } // ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is // returned if the temporality of h is unknown. func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) { t, err := Temporality(h.Temporality) if err != nil { return nil, err } return &mpb.Metric_ExponentialHistogram{ ExponentialHistogram: &mpb.ExponentialHistogram{ AggregationTemporality: t, DataPoints: ExponentialHistogramDataPoints(h.DataPoints), }, }, nil } // ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated // from dPts. func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint { out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts)) for _, dPt := range dPts { sum := float64(dPt.Sum) ehdp := &mpb.ExponentialHistogramDataPoint{ Attributes: AttrIter(dPt.Attributes.Iter()), StartTimeUnixNano: timeUnixNano(dPt.StartTime), TimeUnixNano: timeUnixNano(dPt.Time), Count: dPt.Count, Sum: &sum, Scale: dPt.Scale, ZeroCount: dPt.ZeroCount, Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket), Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket), } if v, ok := dPt.Min.Value(); ok { vF64 := float64(v) ehdp.Min = &vF64 } if v, ok := dPt.Max.Value(); ok { vF64 := float64(v) ehdp.Max = &vF64 } out = append(out, ehdp) } return out } // ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated // from bucket. func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets { return &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: bucket.Offset, BucketCounts: bucket.Counts, } } // Temporality returns an OTLP AggregationTemporality generated from t. If t // is unknown, an error is returned along with the invalid // AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED. func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { switch t { case metricdata.DeltaTemporality: return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil case metricdata.CumulativeTemporality: return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil default: err := fmt.Errorf("%w: %s", errUnknownTemporality, t) return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err } } // timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed // since January 1, 1970 UTC as uint64. // The result is undefined if the Unix time // in nanoseconds cannot be represented by an int64 // (a date before the year 1678 or after 2262). // timeUnixNano on the zero Time returns 0. // The result does not depend on the location associated with t. func timeUnixNano(t time.Time) uint64 { if t.IsZero() { return 0 } return uint64(t.UnixNano()) } opentelemetry-go-1.21.0/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl000066400000000000000000000437171452547353200320330ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package transform import ( "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" cpb "go.opentelemetry.io/proto/otlp/common/v1" mpb "go.opentelemetry.io/proto/otlp/metrics/v1" rpb "go.opentelemetry.io/proto/otlp/resource/v1" ) type unknownAggT struct { metricdata.Aggregation } var ( // Sat Jan 01 2000 00:00:00 GMT+0000. start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0)) end = start.Add(30 * time.Second) alice = attribute.NewSet(attribute.String("user", "alice")) bob = attribute.NewSet(attribute.String("user", "bob")) pbAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "alice"}, }} pbBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "bob"}, }} minA, maxA, sumA = 2.0, 4.0, 90.0 minB, maxB, sumB = 4.0, 150.0, 234.0 otelHDPInt64 = []metricdata.HistogramDataPoint[int64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: metricdata.NewExtrema(int64(minA)), Max: metricdata.NewExtrema(int64(maxA)), Sum: int64(sumA), }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: metricdata.NewExtrema(int64(minB)), Max: metricdata.NewExtrema(int64(maxB)), Sum: int64(sumB), }, } otelHDPFloat64 = []metricdata.HistogramDataPoint[float64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: metricdata.NewExtrema(minA), Max: metricdata.NewExtrema(maxA), Sum: sumA, }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Bounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: metricdata.NewExtrema(minB), Max: metricdata.NewExtrema(maxB), Sum: sumB, }, } otelEBucketA = metricdata.ExponentialBucket{ Offset: 5, Counts: []uint64{0, 5, 0, 5}, } otelEBucketB = metricdata.ExponentialBucket{ Offset: 3, Counts: []uint64{0, 5, 0, 5}, } otelEBucketsC = metricdata.ExponentialBucket{ Offset: 5, Counts: []uint64{0, 1}, } otelEBucketsD = metricdata.ExponentialBucket{ Offset: 3, Counts: []uint64{0, 1}, } otelEHDPInt64 = []metricdata.ExponentialHistogramDataPoint[int64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Scale: 2, ZeroCount: 10, PositiveBucket: otelEBucketA, NegativeBucket: otelEBucketB, ZeroThreshold: .01, Min: metricdata.NewExtrema(int64(minA)), Max: metricdata.NewExtrema(int64(maxA)), Sum: int64(sumA), }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Scale: 4, ZeroCount: 1, PositiveBucket: otelEBucketsC, NegativeBucket: otelEBucketsD, ZeroThreshold: .02, Min: metricdata.NewExtrema(int64(minB)), Max: metricdata.NewExtrema(int64(maxB)), Sum: int64(sumB), }, } otelEHDPFloat64 = []metricdata.ExponentialHistogramDataPoint[float64]{ { Attributes: alice, StartTime: start, Time: end, Count: 30, Scale: 2, ZeroCount: 10, PositiveBucket: otelEBucketA, NegativeBucket: otelEBucketB, ZeroThreshold: .01, Min: metricdata.NewExtrema(minA), Max: metricdata.NewExtrema(maxA), Sum: sumA, }, { Attributes: bob, StartTime: start, Time: end, Count: 3, Scale: 4, ZeroCount: 1, PositiveBucket: otelEBucketsC, NegativeBucket: otelEBucketsD, ZeroThreshold: .02, Min: metricdata.NewExtrema(minB), Max: metricdata.NewExtrema(maxB), Sum: sumB, }, } pbHDP = []*mpb.HistogramDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sumA, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 30, 0}, Min: &minA, Max: &maxA, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 3, Sum: &sumB, ExplicitBounds: []float64{1, 5}, BucketCounts: []uint64{0, 1, 2}, Min: &minB, Max: &maxB, }, } pbEHDPBA = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 5, BucketCounts: []uint64{0, 5, 0, 5}, } pbEHDPBB = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 3, BucketCounts: []uint64{0, 5, 0, 5}, } pbEHDPBC = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 5, BucketCounts: []uint64{0, 1}, } pbEHDPBD = &mpb.ExponentialHistogramDataPoint_Buckets{ Offset: 3, BucketCounts: []uint64{0, 1}, } pbEHDP = []*mpb.ExponentialHistogramDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 30, Sum: &sumA, Scale: 2, ZeroCount: 10, Positive: pbEHDPBA, Negative: pbEHDPBB, Min: &minA, Max: &maxA, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Count: 3, Sum: &sumB, Scale: 4, ZeroCount: 1, Positive: pbEHDPBC, Negative: pbEHDPBD, Min: &minB, Max: &maxB, }, } otelHistInt64 = metricdata.Histogram[int64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelHDPInt64, } otelHistFloat64 = metricdata.Histogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelHDPFloat64, } invalidTemporality metricdata.Temporality otelHistInvalid = metricdata.Histogram[int64]{ Temporality: invalidTemporality, DataPoints: otelHDPInt64, } otelExpoHistInt64 = metricdata.ExponentialHistogram[int64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelEHDPInt64, } otelExpoHistFloat64 = metricdata.ExponentialHistogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: otelEHDPFloat64, } otelExpoHistInvalid = metricdata.ExponentialHistogram[int64]{ Temporality: invalidTemporality, DataPoints: otelEHDPInt64, } pbHist = &mpb.Histogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: pbHDP, } pbExpoHist = &mpb.ExponentialHistogram{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, DataPoints: pbEHDP, } otelDPtsInt64 = []metricdata.DataPoint[int64]{ {Attributes: alice, StartTime: start, Time: end, Value: 1}, {Attributes: bob, StartTime: start, Time: end, Value: 2}, } otelDPtsFloat64 = []metricdata.DataPoint[float64]{ {Attributes: alice, StartTime: start, Time: end, Value: 1.0}, {Attributes: bob, StartTime: start, Time: end, Value: 2.0}, } pbDPtsInt64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 2}, }, } pbDPtsFloat64 = []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0}, }, { Attributes: []*cpb.KeyValue{pbBob}, StartTimeUnixNano: uint64(start.UnixNano()), TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0}, }, } otelSumInt64 = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: otelDPtsInt64, } otelSumFloat64 = metricdata.Sum[float64]{ Temporality: metricdata.DeltaTemporality, IsMonotonic: false, DataPoints: otelDPtsFloat64, } otelSumInvalid = metricdata.Sum[float64]{ Temporality: invalidTemporality, IsMonotonic: false, DataPoints: otelDPtsFloat64, } pbSumInt64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, IsMonotonic: true, DataPoints: pbDPtsInt64, } pbSumFloat64 = &mpb.Sum{ AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, IsMonotonic: false, DataPoints: pbDPtsFloat64, } otelGaugeInt64 = metricdata.Gauge[int64]{DataPoints: otelDPtsInt64} otelGaugeFloat64 = metricdata.Gauge[float64]{DataPoints: otelDPtsFloat64} otelGaugeZeroStartTime = metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: alice, StartTime: time.Time{}, Time: end, Value: 1}, }, } pbGaugeInt64 = &mpb.Gauge{DataPoints: pbDPtsInt64} pbGaugeFloat64 = &mpb.Gauge{DataPoints: pbDPtsFloat64} pbGaugeZeroStartTime = &mpb.Gauge{DataPoints: []*mpb.NumberDataPoint{ { Attributes: []*cpb.KeyValue{pbAlice}, StartTimeUnixNano: 0, TimeUnixNano: uint64(end.UnixNano()), Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, }, }} unknownAgg unknownAggT otelMetrics = []metricdata.Metrics{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: otelGaugeInt64, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: otelGaugeFloat64, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: otelSumInt64, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: otelSumFloat64, }, { Name: "invalid-sum", Description: "Sum with invalid temporality", Unit: "1", Data: otelSumInvalid, }, { Name: "int64-histogram", Description: "Histogram", Unit: "1", Data: otelHistInt64, }, { Name: "float64-histogram", Description: "Histogram", Unit: "1", Data: otelHistFloat64, }, { Name: "invalid-histogram", Description: "Invalid histogram", Unit: "1", Data: otelHistInvalid, }, { Name: "unknown", Description: "Unknown aggregation", Unit: "1", Data: unknownAgg, }, { Name: "int64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: otelExpoHistInt64, }, { Name: "float64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: otelExpoHistFloat64, }, { Name: "invalid-ExponentialHistogram", Description: "Invalid Exponential Histogram", Unit: "1", Data: otelExpoHistInvalid, }, { Name: "zero-time", Description: "Gauge with 0 StartTime", Unit: "1", Data: otelGaugeZeroStartTime, }, } pbMetrics = []*mpb.Metric{ { Name: "int64-gauge", Description: "Gauge with int64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, }, { Name: "float64-gauge", Description: "Gauge with float64 values", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, }, { Name: "int64-sum", Description: "Sum with int64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: pbSumInt64}, }, { Name: "float64-sum", Description: "Sum with float64 values", Unit: "1", Data: &mpb.Metric_Sum{Sum: pbSumFloat64}, }, { Name: "int64-histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: pbHist}, }, { Name: "float64-histogram", Description: "Histogram", Unit: "1", Data: &mpb.Metric_Histogram{Histogram: pbHist}, }, { Name: "int64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, }, { Name: "float64-ExponentialHistogram", Description: "Exponential Histogram", Unit: "1", Data: &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, }, { Name: "zero-time", Description: "Gauge with 0 StartTime", Unit: "1", Data: &mpb.Metric_Gauge{Gauge: pbGaugeZeroStartTime}, }, } otelScopeMetrics = []metricdata.ScopeMetrics{ { Scope: instrumentation.Scope{ Name: "test/code/path", Version: "v0.1.0", SchemaURL: semconv.SchemaURL, }, Metrics: otelMetrics, }, } pbScopeMetrics = []*mpb.ScopeMetrics{ { Scope: &cpb.InstrumentationScope{ Name: "test/code/path", Version: "v0.1.0", }, Metrics: pbMetrics, SchemaUrl: semconv.SchemaURL, }, } otelRes = resource.NewWithAttributes( semconv.SchemaURL, semconv.ServiceName("test server"), semconv.ServiceVersion("v0.1.0"), ) pbRes = &rpb.Resource{ Attributes: []*cpb.KeyValue{ { Key: "service.name", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "test server"}, }, }, { Key: "service.version", Value: &cpb.AnyValue{ Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"}, }, }, }, } otelResourceMetrics = &metricdata.ResourceMetrics{ Resource: otelRes, ScopeMetrics: otelScopeMetrics, } pbResourceMetrics = &mpb.ResourceMetrics{ Resource: pbRes, ScopeMetrics: pbScopeMetrics, SchemaUrl: semconv.SchemaURL, } ) func TestTransformations(t *testing.T) { // Run tests from the "bottom-up" of the metricdata data-types and halt // when a failure occurs to ensure the clearest failure message (as // opposed to the opposite of testing from the top-down which will obscure // errors deep inside the structs). // DataPoint types. assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPInt64)) assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPFloat64)) assert.Equal(t, pbDPtsInt64, DataPoints[int64](otelDPtsInt64)) require.Equal(t, pbDPtsFloat64, DataPoints[float64](otelDPtsFloat64)) assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPInt64)) assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPFloat64)) assert.Equal(t, pbEHDPBA, ExponentialHistogramDataPointBuckets(otelEBucketA)) // Aggregations. h, err := Histogram(otelHistInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h) h, err = Histogram(otelHistFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h) h, err = Histogram(otelHistInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, h) s, err := Sum[int64](otelSumInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumInt64}, s) s, err = Sum[float64](otelSumFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumFloat64}, s) s, err = Sum[float64](otelSumInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, s) assert.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, Gauge[int64](otelGaugeInt64)) require.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, Gauge[float64](otelGaugeFloat64)) e, err := ExponentialHistogram(otelExpoHistInt64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e) e, err = ExponentialHistogram(otelExpoHistFloat64) assert.NoError(t, err) assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e) e, err = ExponentialHistogram(otelExpoHistInvalid) assert.ErrorIs(t, err, errUnknownTemporality) assert.Nil(t, e) // Metrics. m, err := Metrics(otelMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbMetrics, m) // Scope Metrics. sm, err := ScopeMetrics(otelScopeMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbScopeMetrics, sm) // Resource Metrics. rm, err := ResourceMetrics(otelResourceMetrics) assert.ErrorIs(t, err, errUnknownTemporality) assert.ErrorIs(t, err, errUnknownAggregation) require.Equal(t, pbResourceMetrics, rm) } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/000077500000000000000000000000001452547353200231415ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/header.go.tmpl000066400000000000000000000021021452547353200256660ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/header.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ) // GetUserAgentHeader returns an OTLP header value form "OTel OTLP Exporter Go/{ .Version }" // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md#user-agent func GetUserAgentHeader() string { return "OTel OTLP Exporter Go/" + otlptrace.Version() } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/header_test.go.tmpl000066400000000000000000000016071452547353200267360ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/header_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "testing" "github.com/stretchr/testify/require" ) func TestGetUserAgentHeader(t *testing.T) { require.Regexp(t, "OTel OTLP Exporter Go/1\\..*", GetUserAgentHeader()) } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlpconfig/000077500000000000000000000000001452547353200253055ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl000066400000000000000000000122001452547353200305600ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig import ( "crypto/tls" "crypto/x509" "net/url" "os" "path" "strings" "time" "{{ .envconfigImportPath }}" ) // DefaultEnvOptionsReader is the default environments reader. var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: os.Getenv, ReadFile: os.ReadFile, Namespace: "OTEL_EXPORTER_OTLP", } // ApplyGRPCEnvConfigs applies the env configurations for gRPC. func ApplyGRPCEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } return cfg } // ApplyHTTPEnvConfigs applies the env configurations for HTTP. func ApplyHTTPEnvConfigs(cfg Config) Config { opts := getOptionsFromEnv() for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } return cfg } func getOptionsFromEnv() []GenericOption { opts := []GenericOption{} tlsConf := &tls.Config{} DefaultEnvOptionsReader.Apply( envconfig.WithURL("ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Traces.Endpoint = u.Host // For OTLP/HTTP endpoint URLs without a per-signal // configuration, the passed endpoint is used as a base URL // and the signals are sent to these paths relative to that. cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) return cfg }, withEndpointForGRPC(u))) }), envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Traces.Endpoint = u.Host // For endpoint URLs for OTLP/HTTP per-signal variables, the // URL MUST be used as-is without any modification. The only // exception is that if an URL contains no path part, the root // path / MUST be used. path := u.Path if path == "" { path = "/" } cfg.Traces.URLPath = path return cfg }, withEndpointForGRPC(u))) }), envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), ) return opts } func withEndpointScheme(u *url.URL) GenericOption { switch strings.ToLower(u.Scheme) { case "http", "unix": return WithInsecure() default: return WithSecure() } } func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { return func(cfg Config) Config { // For OTLP/gRPC endpoints, this is the target to which the // exporter is going to send telemetry. cfg.Traces.Endpoint = path.Join(u.Host, u.Path) return cfg } } // WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if v, ok := e.GetEnvValue(n); ok { cp := NoCompression if v == "gzip" { cp = GzipCompression } fn(cp) } } } // revive:disable-next-line:flag-parameter func withInsecure(b bool) GenericOption { if b { return WithInsecure() } return WithSecure() } func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { return func(e *envconfig.EnvOptionsReader) { if c.RootCAs != nil || len(c.Certificates) > 0 { fn(c) } } } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl000066400000000000000000000207201452547353200303030ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig import ( "crypto/tls" "fmt" "path" "strings" "time" "google.golang.org/grpc" "google.golang.org/grpc/backoff" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "{{ .retryImportPath }}" ) const ( // DefaultTracesPath is a default URL path for endpoint that // receives spans. DefaultTracesPath string = "/v1/traces" // DefaultTimeout is a default max waiting time for the backend to process // each span batch. DefaultTimeout time.Duration = 10 * time.Second ) type ( SignalConfig struct { Endpoint string Insecure bool TLSCfg *tls.Config Headers map[string]string Compression Compression Timeout time.Duration URLPath string // gRPC configurations GRPCCredentials credentials.TransportCredentials } Config struct { // Signal specific configurations Traces SignalConfig RetryConfig retry.Config // gRPC configurations ReconnectionPeriod time.Duration ServiceConfig string DialOptions []grpc.DialOption GRPCConn *grpc.ClientConn } ) // NewHTTPConfig returns a new Config with all settings applied from opts and // any unset setting using the default HTTP config values. func NewHTTPConfig(opts ...HTTPOption) Config { cfg := Config{ Traces: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), URLPath: DefaultTracesPath, Compression: NoCompression, Timeout: DefaultTimeout, }, RetryConfig: retry.DefaultConfig, } cfg = ApplyHTTPEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyHTTPOption(cfg) } cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) return cfg } // cleanPath returns a path with all spaces trimmed and all redundancies // removed. If urlPath is empty or cleaning it results in an empty string, // defaultPath is returned instead. func cleanPath(urlPath string, defaultPath string) string { tmp := path.Clean(strings.TrimSpace(urlPath)) if tmp == "." { return defaultPath } if !path.IsAbs(tmp) { tmp = fmt.Sprintf("/%s", tmp) } return tmp } // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() cfg := Config{ Traces: SignalConfig{ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), URLPath: DefaultTracesPath, Compression: NoCompression, Timeout: DefaultTimeout, }, RetryConfig: retry.DefaultConfig, DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) } if cfg.ServiceConfig != "" { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) } // Priroritize GRPCCredentials over Insecure (passing both is an error). if cfg.Traces.GRPCCredentials != nil { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) } else if cfg.Traces.Insecure { cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) } else { // Default to using the host's root CA. creds := credentials.NewTLS(nil) cfg.Traces.GRPCCredentials = creds cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) } if cfg.Traces.Compression == GzipCompression { cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) } if cfg.ReconnectionPeriod != 0 { p := grpc.ConnectParams{ Backoff: backoff.DefaultConfig, MinConnectTimeout: cfg.ReconnectionPeriod, } cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) } return cfg } type ( // GenericOption applies an option to the HTTP or gRPC driver. GenericOption interface { ApplyHTTPOption(Config) Config ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // HTTPOption applies an option to the HTTP driver. HTTPOption interface { ApplyHTTPOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // GRPCOption applies an option to the gRPC driver. GRPCOption interface { ApplyGRPCOption(Config) Config // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } ) // genericOption is an option that applies the same logic // for both gRPC and HTTP. type genericOption struct { fn func(Config) Config } func (g *genericOption) ApplyGRPCOption(cfg Config) Config { return g.fn(cfg) } func (g *genericOption) ApplyHTTPOption(cfg Config) Config { return g.fn(cfg) } func (genericOption) private() {} func newGenericOption(fn func(cfg Config) Config) GenericOption { return &genericOption{fn: fn} } // splitOption is an option that applies different logics // for gRPC and HTTP. type splitOption struct { httpFn func(Config) Config grpcFn func(Config) Config } func (g *splitOption) ApplyGRPCOption(cfg Config) Config { return g.grpcFn(cfg) } func (g *splitOption) ApplyHTTPOption(cfg Config) Config { return g.httpFn(cfg) } func (splitOption) private() {} func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { return &splitOption{httpFn: httpFn, grpcFn: grpcFn} } // httpOption is an option that is only applied to the HTTP driver. type httpOption struct { fn func(Config) Config } func (h *httpOption) ApplyHTTPOption(cfg Config) Config { return h.fn(cfg) } func (httpOption) private() {} func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { return &httpOption{fn: fn} } // grpcOption is an option that is only applied to the gRPC driver. type grpcOption struct { fn func(Config) Config } func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { return h.fn(cfg) } func (grpcOption) private() {} func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { return &grpcOption{fn: fn} } // Generic Options func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint return cfg }) } func WithCompression(compression Compression) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Compression = compression return cfg }) } func WithURLPath(urlPath string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.URLPath = urlPath return cfg }) } func WithRetry(rc retry.Config) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.RetryConfig = rc return cfg }) } func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { return newSplitOption(func(cfg Config) Config { cfg.Traces.TLSCfg = tlsCfg.Clone() return cfg }, func(cfg Config) Config { cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) return cfg }) } func WithInsecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Insecure = true return cfg }) } func WithSecure() GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Insecure = false return cfg }) } func WithHeaders(headers map[string]string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Headers = headers return cfg }) } func WithTimeout(duration time.Duration) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Timeout = duration return cfg }) } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl000066400000000000000000000336601452547353200313510ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig import ( "errors" "testing" "time" "github.com/stretchr/testify/assert" "{{ .envconfigImportPath }}" ) const ( WeakCertificate = ` -----BEGIN CERTIFICATE----- MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9 nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/ 1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH Lhnm4N/QDk5rek0= -----END CERTIFICATE----- ` WeakPrivateKey = ` -----BEGIN PRIVATE KEY----- MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV -----END PRIVATE KEY----- ` ) type env map[string]string func (e *env) getEnv(env string) string { return (*e)[env] } type fileReader map[string][]byte func (f *fileReader) readFile(filename string) ([]byte, error) { if b, ok := (*f)[filename]; ok { return b, nil } return nil, errors.New("file not found") } func TestConfigs(t *testing.T) { tlsCert, err := CreateTLSConfig([]byte(WeakCertificate)) assert.NoError(t, err) tests := []struct { name string opts []GenericOption env env fileReader fileReader asserts func(t *testing.T, c *Config, grpcOption bool) }{ { name: "Test default configs", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.Equal(t, "localhost:4317", c.Traces.Endpoint) } else { assert.Equal(t, "localhost:4318", c.Traces.Endpoint) } assert.Equal(t, NoCompression, c.Traces.Compression) assert.Equal(t, map[string]string(nil), c.Traces.Headers) assert.Equal(t, 10*time.Second, c.Traces.Timeout) }, }, // Endpoint Tests { name: "Test With Endpoint", opts: []GenericOption{ WithEndpoint("someendpoint"), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "someendpoint", c.Traces.Endpoint) }, }, { name: "Test Environment Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.False(t, c.Traces.Insecure) if grpcOption { assert.Equal(t, "env.endpoint/prefix", c.Traces.Endpoint) } else { assert.Equal(t, "env.endpoint", c.Traces.Endpoint) assert.Equal(t, "/prefix/v1/traces", c.Traces.URLPath) } }, }, { name: "Test Environment Signal Specific Endpoint", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": "http://env.traces.endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.True(t, c.Traces.Insecure) assert.Equal(t, "env.traces.endpoint", c.Traces.Endpoint) if !grpcOption { assert.Equal(t, "/", c.Traces.URLPath) } }, }, { name: "Test Mixed Environment and With Endpoint", opts: []GenericOption{ WithEndpoint("traces_endpoint"), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "traces_endpoint", c.Traces.Endpoint) }, }, { name: "Test Environment Endpoint with HTTP scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, { name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, { name: "Test Environment Endpoint with HTTPS scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Traces.Endpoint) assert.Equal(t, false, c.Traces.Insecure) }, }, { name: "Test Environment Signal Specific Endpoint with uppercase scheme", env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": "HtTp://env_traces_endpoint", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, "env_traces_endpoint", c.Traces.Endpoint) assert.Equal(t, true, c.Traces.Insecure) }, }, // Certificate tests { name: "Test Default Certificate", asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { assert.Nil(t, c.Traces.TLSCfg) } }, }, { name: "Test With Certificate", opts: []GenericOption{ WithTLSClientConfig(tlsCert), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { // TODO: make sure gRPC's credentials actually works assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Environment Signal Specific Certificate", env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), "invalid_cert": []byte("invalid certificate file."), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, { name: "Test Mixed Environment and With Certificate", opts: []GenericOption{}, env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Traces.GRPCCredentials) } else { // nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool. assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects()) } }, }, // Headers tests { name: "Test With Headers", opts: []GenericOption{ WithHeaders(map[string]string{"h1": "v1"}), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1"}, c.Traces.Headers) }, }, { name: "Test Environment Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, { name: "Test Environment Signal Specific Headers", env: map[string]string{ "OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_TRACES_HEADERS": "h1=v1,h2=v2", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, { name: "Test Mixed Environment and With Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, opts: []GenericOption{}, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers) }, }, // Compression Tests { name: "Test With Compression", opts: []GenericOption{ WithCompression(GzipCompression), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Environment Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Environment Signal Specific Compression", env: map[string]string{ "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, GzipCompression, c.Traces.Compression) }, }, { name: "Test Mixed Environment and With Compression", opts: []GenericOption{ WithCompression(NoCompression), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION": "gzip", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, NoCompression, c.Traces.Compression) }, }, // Timeout Tests { name: "Test With Timeout", opts: []GenericOption{ WithTimeout(time.Duration(5 * time.Second)), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, 5*time.Second, c.Traces.Timeout) }, }, { name: "Test Environment Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 15*time.Second) }, }, { name: "Test Environment Signal Specific Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT": "27000", }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 27*time.Second) }, }, { name: "Test Mixed Environment and With Timeout", env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT": "27000", }, opts: []GenericOption{ WithTimeout(5 * time.Second), }, asserts: func(t *testing.T, c *Config, grpcOption bool) { assert.Equal(t, c.Traces.Timeout, 5*time.Second) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { origEOR := DefaultEnvOptionsReader DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: tt.env.getEnv, ReadFile: tt.fileReader.readFile, Namespace: "OTEL_EXPORTER_OTLP", } t.Cleanup(func() { DefaultEnvOptionsReader = origEOR }) // Tests Generic options as HTTP Options cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...) tt.asserts(t, &cfg, false) // Tests Generic options as gRPC Options cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...) tt.asserts(t, &cfg, true) }) } } func asHTTPOptions(opts []GenericOption) []HTTPOption { converted := make([]HTTPOption, len(opts)) for i, o := range opts { converted[i] = NewHTTPOption(o.ApplyHTTPOption) } return converted } func asGRPCOptions(opts []GenericOption) []GRPCOption { converted := make([]GRPCOption, len(opts)) for i, o := range opts { converted[i] = NewGRPCOption(o.ApplyGRPCOption) } return converted } func TestCleanPath(t *testing.T) { type args struct { urlPath string defaultPath string } tests := []struct { name string args args want string }{ { name: "clean empty path", args: args{ urlPath: "", defaultPath: "DefaultPath", }, want: "DefaultPath", }, { name: "clean metrics path", args: args{ urlPath: "/prefix/v1/metrics", defaultPath: "DefaultMetricsPath", }, want: "/prefix/v1/metrics", }, { name: "clean traces path", args: args{ urlPath: "https://env_endpoint", defaultPath: "DefaultTracesPath", }, want: "/https:/env_endpoint", }, { name: "spaces trimmed", args: args{ urlPath: " /dir", }, want: "/dir", }, { name: "clean path empty", args: args{ urlPath: "dir/..", defaultPath: "DefaultTracesPath", }, want: "DefaultTracesPath", }, { name: "make absolute", args: args{ urlPath: "dir/a", }, want: "/dir/a", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want { t.Errorf("CleanPath() = %v, want %v", got, tt.want) } }) } } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl000066400000000000000000000033341452547353200312070ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig const ( // DefaultCollectorGRPCPort is the default gRPC port of the collector. DefaultCollectorGRPCPort uint16 = 4317 // DefaultCollectorHTTPPort is the default HTTP port of the collector. DefaultCollectorHTTPPort uint16 = 4318 // DefaultCollectorHost is the host address the Exporter will attempt // connect to if no collector address is provided. DefaultCollectorHost string = "localhost" ) // Compression describes the compression used for payloads sent to the // collector. type Compression int const ( // NoCompression tells the driver to send payloads without // compression. NoCompression Compression = iota // GzipCompression tells the driver to send payloads after // compressing them with gzip. GzipCompression ) // Marshaler describes the kind of message format sent to the collector. type Marshaler int const ( // MarshalProto tells the driver to send using the protobuf binary format. MarshalProto Marshaler = iota // MarshalJSON tells the driver to send using json format. MarshalJSON ) opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl000066400000000000000000000021671452547353200274170ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlpconfig import ( "crypto/tls" "crypto/x509" "errors" ) // CreateTLSConfig creates a tls.Config from a raw certificate bytes // to verify a server certificate. func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { cp := x509.NewCertPool() if ok := cp.AppendCertsFromPEM(certBytes); !ok { return nil, errors.New("failed to append certificate to the cert pool") } return &tls.Config{ RootCAs: cp, }, nil } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlptracetest/000077500000000000000000000000001452547353200260365ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl000066400000000000000000000074701452547353200306260ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest import ( "context" "errors" "sync" "testing" "time" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" ) func RunExporterShutdownTest(t *testing.T, factory func() otlptrace.Client) { t.Run("testClientStopHonorsTimeout", func(t *testing.T) { testClientStopHonorsTimeout(t, factory()) }) t.Run("testClientStopHonorsCancel", func(t *testing.T) { testClientStopHonorsCancel(t, factory()) }) t.Run("testClientStopNoError", func(t *testing.T) { testClientStopNoError(t, factory()) }) t.Run("testClientStopManyTimes", func(t *testing.T) { testClientStopManyTimes(t, factory()) }) } func initializeExporter(t *testing.T, client otlptrace.Client) *otlptrace.Exporter { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() e, err := otlptrace.New(ctx, client) if err != nil { t.Fatalf("failed to create exporter") } return e } func testClientStopHonorsTimeout(t *testing.T, client otlptrace.Client) { t.Cleanup(func() { // The test is looking for a failed shut down. Call Stop a second time // with an un-expired context to give the client a second chance at // cleaning up. There is not guarantee from the Client interface this // will succeed, therefore, no need to check the error (just give it a // best try). _ = client.Stop(context.Background()) }) e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() <-ctx.Done() if err := e.Shutdown(ctx); !errors.Is(err, context.DeadlineExceeded) { t.Errorf("expected context DeadlineExceeded error, got %v", err) } } func testClientStopHonorsCancel(t *testing.T, client otlptrace.Client) { t.Cleanup(func() { // The test is looking for a failed shut down. Call Stop a second time // with an un-expired context to give the client a second chance at // cleaning up. There is not guarantee from the Client interface this // will succeed, therefore, no need to check the error (just give it a // best try). _ = client.Stop(context.Background()) }) e := initializeExporter(t, client) ctx, cancel := context.WithCancel(context.Background()) cancel() if err := e.Shutdown(ctx); !errors.Is(err, context.Canceled) { t.Errorf("expected context canceled error, got %v", err) } } func testClientStopNoError(t *testing.T, client otlptrace.Client) { e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() if err := e.Shutdown(ctx); err != nil { t.Errorf("shutdown errored: expected nil, got %v", err) } } func testClientStopManyTimes(t *testing.T, client otlptrace.Client) { e := initializeExporter(t, client) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel() ch := make(chan struct{}) wg := sync.WaitGroup{} const num int = 20 wg.Add(num) errs := make([]error, num) for i := 0; i < num; i++ { go func(idx int) { defer wg.Done() <-ch errs[idx] = e.Shutdown(ctx) }(i) } close(ch) wg.Wait() for _, err := range errs { if err != nil { t.Errorf("failed to shutdown exporter: %v", err) return } } } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl000066400000000000000000000060231452547353200313270ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest import ( "sort" collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" commonpb "go.opentelemetry.io/proto/otlp/common/v1" resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) // TracesCollector mocks a collector for the end-to-end testing. type TracesCollector interface { Stop() error GetResourceSpans() []*tracepb.ResourceSpans } // SpansStorage stores the spans. Mock collectors can use it to // store spans they have received. type SpansStorage struct { rsm map[string]*tracepb.ResourceSpans spanCount int } // NewSpansStorage creates a new spans storage. func NewSpansStorage() SpansStorage { return SpansStorage{ rsm: make(map[string]*tracepb.ResourceSpans), } } // AddSpans adds spans to the spans storage. func (s *SpansStorage) AddSpans(request *collectortracepb.ExportTraceServiceRequest) { for _, rs := range request.GetResourceSpans() { rstr := resourceString(rs.Resource) if existingRs, ok := s.rsm[rstr]; !ok { s.rsm[rstr] = rs // TODO (rghetia): Add support for library Info. if len(rs.ScopeSpans) == 0 { rs.ScopeSpans = []*tracepb.ScopeSpans{ { Spans: []*tracepb.Span{}, }, } } s.spanCount += len(rs.ScopeSpans[0].Spans) } else { if len(rs.ScopeSpans) > 0 { newSpans := rs.ScopeSpans[0].GetSpans() existingRs.ScopeSpans[0].Spans = append(existingRs.ScopeSpans[0].Spans, newSpans...) s.spanCount += len(newSpans) } } } } // GetSpans returns the stored spans. func (s *SpansStorage) GetSpans() []*tracepb.Span { spans := make([]*tracepb.Span, 0, s.spanCount) for _, rs := range s.rsm { spans = append(spans, rs.ScopeSpans[0].Spans...) } return spans } // GetResourceSpans returns the stored resource spans. func (s *SpansStorage) GetResourceSpans() []*tracepb.ResourceSpans { rss := make([]*tracepb.ResourceSpans, 0, len(s.rsm)) for _, rs := range s.rsm { rss = append(rss, rs) } return rss } func resourceString(res *resourcepb.Resource) string { sAttrs := sortedAttributes(res.GetAttributes()) rstr := "" for _, attr := range sAttrs { rstr = rstr + attr.String() } return rstr } func sortedAttributes(attrs []*commonpb.KeyValue) []*commonpb.KeyValue { sort.Slice(attrs[:], func(i, j int) bool { return attrs[i].Key < attrs[j].Key }) return attrs } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl000066400000000000000000000046151452547353200302570ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest import ( "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" "go.opentelemetry.io/otel/trace" ) // SingleReadOnlySpan returns a one-element slice with a read-only span. It // may be useful for testing driver's trace export. func SingleReadOnlySpan() []tracesdk.ReadOnlySpan { return tracetest.SpanStubs{ { SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9}, SpanID: trace.SpanID{3, 4, 5, 6, 7, 8, 9, 0}, TraceFlags: trace.FlagsSampled, }), Parent: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: trace.TraceID{2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9}, SpanID: trace.SpanID{1, 2, 3, 4, 5, 6, 7, 8}, TraceFlags: trace.FlagsSampled, }), SpanKind: trace.SpanKindInternal, Name: "foo", StartTime: time.Date(2020, time.December, 8, 20, 23, 0, 0, time.UTC), EndTime: time.Date(2020, time.December, 0, 20, 24, 0, 0, time.UTC), Attributes: []attribute.KeyValue{}, Events: []tracesdk.Event{}, Links: []tracesdk.Link{}, Status: tracesdk.Status{Code: codes.Ok}, DroppedAttributes: 0, DroppedEvents: 0, DroppedLinks: 0, ChildSpanCount: 0, Resource: resource.NewSchemaless(attribute.String("a", "b")), InstrumentationLibrary: instrumentation.Library{ Name: "bar", Version: "0.0.0", }, }, }.Snapshots() } opentelemetry-go-1.21.0/internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl000066400000000000000000000076311452547353200312250ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otlptracetest import ( "context" "testing" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) // RunEndToEndTest can be used by otlptrace.Client tests to validate // themselves. func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, tracesCollector TracesCollector) { pOpts := []sdktrace.TracerProviderOption{ sdktrace.WithSampler(sdktrace.AlwaysSample()), sdktrace.WithBatcher( exp, // add following two options to ensure flush sdktrace.WithBatchTimeout(5*time.Second), sdktrace.WithMaxExportBatchSize(10), ), } tp1 := sdktrace.NewTracerProvider(append(pOpts, sdktrace.WithResource(resource.NewSchemaless( attribute.String("rk1", "rv11)"), attribute.Int64("rk2", 5), )))...) tp2 := sdktrace.NewTracerProvider(append(pOpts, sdktrace.WithResource(resource.NewSchemaless( attribute.String("rk1", "rv12)"), attribute.Float64("rk3", 6.5), )))...) tr1 := tp1.Tracer("test-tracer1") tr2 := tp2.Tracer("test-tracer2") // Now create few spans m := 4 for i := 0; i < m; i++ { _, span := tr1.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() _, span = tr2.Start(ctx, "AlwaysSample") span.SetAttributes(attribute.Int64("i", int64(i))) span.End() } func() { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if err := tp1.Shutdown(ctx); err != nil { t.Fatalf("failed to shut down a tracer provider 1: %v", err) } if err := tp2.Shutdown(ctx); err != nil { t.Fatalf("failed to shut down a tracer provider 2: %v", err) } }() // Wait >2 cycles. <-time.After(40 * time.Millisecond) // Now shutdown the exporter ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() if err := exp.Shutdown(ctx); err != nil { t.Fatalf("failed to stop the exporter: %v", err) } // Shutdown the collector too so that we can begin // verification checks of expected data back. if err := tracesCollector.Stop(); err != nil { t.Fatalf("failed to stop the mock collector: %v", err) } // Now verify that we only got two resources rss := tracesCollector.GetResourceSpans() if got, want := len(rss), 2; got != want { t.Fatalf("resource span count: got %d, want %d\n", got, want) } // Now verify spans and attributes for each resource span. for _, rs := range rss { if len(rs.ScopeSpans) == 0 { t.Fatalf("zero ScopeSpans") } if got, want := len(rs.ScopeSpans[0].Spans), m; got != want { t.Fatalf("span counts: got %d, want %d", got, want) } attrMap := map[int64]bool{} for _, s := range rs.ScopeSpans[0].Spans { if gotName, want := s.Name, "AlwaysSample"; gotName != want { t.Fatalf("span name: got %s, want %s", gotName, want) } attrMap[s.Attributes[0].Value.Value.(*commonpb.AnyValue_IntValue).IntValue] = true } if got, want := len(attrMap), m; got != want { t.Fatalf("span attribute unique values: got %d want %d", got, want) } for i := 0; i < m; i++ { _, ok := attrMap[int64(i)] if !ok { t.Fatalf("span with attribute %d missing", i) } } } } opentelemetry-go-1.21.0/internal/shared/otlp/partialsuccess.go.tmpl000066400000000000000000000040671452547353200255020ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import "fmt" // PartialSuccess represents the underlying error for all handling // OTLP partial success messages. Use `errors.Is(err, // PartialSuccess{})` to test whether an error passed to the OTel // error handler belongs to this category. type PartialSuccess struct { ErrorMessage string RejectedItems int64 RejectedKind string } var _ error = PartialSuccess{} // Error implements the error interface. func (ps PartialSuccess) Error() string { msg := ps.ErrorMessage if msg == "" { msg = "empty message" } return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) } // Is supports the errors.Is() interface. func (ps PartialSuccess) Is(err error) bool { _, ok := err.(PartialSuccess) return ok } // TracePartialSuccessError returns an error describing a partial success // response for the trace signal. func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "spans", } } // MetricPartialSuccessError returns an error describing a partial success // response for the metric signal. func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { return PartialSuccess{ ErrorMessage: errorMessage, RejectedItems: itemsRejected, RejectedKind: "metric data points", } } opentelemetry-go-1.21.0/internal/shared/otlp/partialsuccess_test.go.tmpl000066400000000000000000000031221452547353200265300ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/partialsuccess_test.go // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "errors" "strings" "testing" "github.com/stretchr/testify/require" ) func requireErrorString(t *testing.T, expect string, err error) { t.Helper() require.NotNil(t, err) require.Error(t, err) require.True(t, errors.Is(err, PartialSuccess{})) const pfx = "OTLP partial success: " msg := err.Error() require.True(t, strings.HasPrefix(msg, pfx)) require.Equal(t, expect, msg[len(pfx):]) } func TestPartialSuccessFormat(t *testing.T) { requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, "")) requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help")) requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened")) requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened")) } opentelemetry-go-1.21.0/internal/shared/otlp/retry/000077500000000000000000000000001452547353200223115ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/shared/otlp/retry/retry.go.tmpl000066400000000000000000000114451452547353200247650ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package retry provides request retry functionality that can perform // configurable exponential backoff for transient errors and honor any // explicit throttle responses received. package retry import ( "context" "fmt" "time" "github.com/cenkalti/backoff/v4" ) // DefaultConfig are the recommended defaults to use. var DefaultConfig = Config{ Enabled: true, InitialInterval: 5 * time.Second, MaxInterval: 30 * time.Second, MaxElapsedTime: time.Minute, } // Config defines configuration for retrying batches in case of export failure // using an exponential backoff. type Config struct { // Enabled indicates whether to not retry sending batches in case of // export failure. Enabled bool // InitialInterval the time to wait after the first failure before // retrying. InitialInterval time.Duration // MaxInterval is the upper bound on backoff interval. Once this value is // reached the delay between consecutive retries will always be // `MaxInterval`. MaxInterval time.Duration // MaxElapsedTime is the maximum amount of time (including retries) spent // trying to send a request/batch. Once this value is reached, the data // is discarded. MaxElapsedTime time.Duration } // RequestFunc wraps a request with retry logic. type RequestFunc func(context.Context, func(context.Context) error) error // EvaluateFunc returns if an error is retry-able and if an explicit throttle // duration should be honored that was included in the error. // // The function must return true if the error argument is retry-able, // otherwise it must return false for the first return parameter. // // The function must return a non-zero time.Duration if the error contains // explicit throttle duration that should be honored, otherwise it must return // a zero valued time.Duration. type EvaluateFunc func(error) (bool, time.Duration) // RequestFunc returns a RequestFunc using the evaluate function to determine // if requests can be retried and based on the exponential backoff // configuration of c. func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { if !c.Enabled { return func(ctx context.Context, fn func(context.Context) error) error { return fn(ctx) } } return func(ctx context.Context, fn func(context.Context) error) error { // Do not use NewExponentialBackOff since it calls Reset and the code here // must call Reset after changing the InitialInterval (this saves an // unnecessary call to Now). b := &backoff.ExponentialBackOff{ InitialInterval: c.InitialInterval, RandomizationFactor: backoff.DefaultRandomizationFactor, Multiplier: backoff.DefaultMultiplier, MaxInterval: c.MaxInterval, MaxElapsedTime: c.MaxElapsedTime, Stop: backoff.Stop, Clock: backoff.SystemClock, } b.Reset() for { err := fn(ctx) if err == nil { return nil } retryable, throttle := evaluate(err) if !retryable { return err } bOff := b.NextBackOff() if bOff == backoff.Stop { return fmt.Errorf("max retry time elapsed: %w", err) } // Wait for the greater of the backoff or throttle delay. var delay time.Duration if bOff > throttle { delay = bOff } else { elapsed := b.GetElapsedTime() if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { return fmt.Errorf("max retry time would elapse: %w", err) } delay = throttle } if ctxErr := waitFunc(ctx, delay); ctxErr != nil { return fmt.Errorf("%w: %s", ctxErr, err) } } } } // Allow override for testing. var waitFunc = wait // wait takes the caller's context, and the amount of time to wait. It will // return nil if the timer fires before or at the same time as the context's // deadline. This indicates that the call can be retried. func wait(ctx context.Context, delay time.Duration) error { timer := time.NewTimer(delay) defer timer.Stop() select { case <-ctx.Done(): // Handle the case where the timer and context deadline end // simultaneously by prioritizing the timer expiration nil value // response. select { case <-timer.C: default: return ctx.Err() } case <-timer.C: } return nil } opentelemetry-go-1.21.0/internal/shared/otlp/retry/retry_test.go.tmpl000066400000000000000000000145671452547353200260340ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/otlp/retry/retry_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package retry import ( "context" "errors" "math" "sync" "testing" "time" "github.com/cenkalti/backoff/v4" "github.com/stretchr/testify/assert" ) func TestWait(t *testing.T) { tests := []struct { ctx context.Context delay time.Duration expected error }{ { ctx: context.Background(), delay: time.Duration(0), }, { ctx: context.Background(), delay: time.Duration(1), }, { ctx: context.Background(), delay: time.Duration(-1), }, { ctx: func() context.Context { ctx, cancel := context.WithCancel(context.Background()) cancel() return ctx }(), // Ensure the timer and context do not end simultaneously. delay: 1 * time.Hour, expected: context.Canceled, }, } for _, test := range tests { err := wait(test.ctx, test.delay) if test.expected == nil { assert.NoError(t, err) } else { assert.ErrorIs(t, err, test.expected) } } } func TestNonRetryableError(t *testing.T) { ev := func(error) (bool, time.Duration) { return false, 0 } reqFunc := Config{ Enabled: true, InitialInterval: 1 * time.Nanosecond, MaxInterval: 1 * time.Nanosecond, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestThrottledRetry(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. throttleDelay, backoffDelay := time.Second, time.Nanosecond ev := func(error) (bool, time.Duration) { // Retry everything with a throttle delay. return true, throttleDelay } reqFunc := Config{ Enabled: true, InitialInterval: backoffDelay, MaxInterval: backoffDelay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, delay time.Duration) error { assert.Equal(t, throttleDelay, delay, "retry not throttled") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } defer func() { waitFunc = origWait }() ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetry(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 0, }.RequestFunc(ev) origWait := waitFunc var done bool waitFunc = func(_ context.Context, d time.Duration) error { delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor) assert.InDelta(t, delay, d, delta, "retry not backoffed") // Try twice to ensure call is attempted again after delay. if done { return assert.AnError } done = true return nil } t.Cleanup(func() { waitFunc = origWait }) ctx := context.Background() assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return errors.New("not this error") }), assert.AnError) } func TestBackoffRetryCanceledContext(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Millisecond reqFunc := Config{ Enabled: true, InitialInterval: delay, MaxInterval: delay, // Never stop retrying. MaxElapsedTime: 10 * time.Millisecond, }.RequestFunc(ev) ctx, cancel := context.WithCancel(context.Background()) count := 0 cancel() err := reqFunc(ctx, func(context.Context) error { count++ return assert.AnError }) assert.ErrorIs(t, err, context.Canceled) assert.Contains(t, err.Error(), assert.AnError.Error()) assert.Equal(t, 1, count) } func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) { // Ensure the throttle delay is used by making longer than backoff delay. tDelay, bDelay := time.Hour, time.Nanosecond ev := func(error) (bool, time.Duration) { return true, tDelay } reqFunc := Config{ Enabled: true, InitialInterval: bDelay, MaxInterval: bDelay, MaxElapsedTime: tDelay - (time.Nanosecond), }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time would elapse: ") } func TestMaxElapsedTime(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } delay := time.Nanosecond reqFunc := Config{ Enabled: true, // InitialInterval > MaxElapsedTime means immediate return. InitialInterval: 2 * delay, MaxElapsedTime: delay, }.RequestFunc(ev) ctx := context.Background() assert.Contains(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }).Error(), "max retry time elapsed: ") } func TestRetryNotEnabled(t *testing.T) { ev := func(error) (bool, time.Duration) { t.Error("evaluated retry when not enabled") return false, 0 } reqFunc := Config{}.RequestFunc(ev) ctx := context.Background() assert.NoError(t, reqFunc(ctx, func(context.Context) error { return nil })) assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { return assert.AnError }), assert.AnError) } func TestRetryConcurrentSafe(t *testing.T) { ev := func(error) (bool, time.Duration) { return true, 0 } reqFunc := Config{ Enabled: true, }.RequestFunc(ev) var wg sync.WaitGroup ctx := context.Background() for i := 1; i < 5; i++ { wg.Add(1) go func() { defer wg.Done() var done bool assert.NoError(t, reqFunc(ctx, func(context.Context) error { if !done { done = true return assert.AnError } return nil })) }() } wg.Wait() } opentelemetry-go-1.21.0/internal/tools/000077500000000000000000000000001452547353200200605ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/tools/go.mod000066400000000000000000000256771452547353200212070ustar00rootroot00000000000000module go.opentelemetry.io/otel/internal/tools go 1.20 require ( github.com/client9/misspell v0.3.4 github.com/gogo/protobuf v1.3.2 github.com/golangci/golangci-lint v1.55.2 github.com/itchyny/gojq v0.12.13 github.com/jcchavezs/porto v0.6.0 github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad go.opentelemetry.io/build-tools/crosslink v0.12.0 go.opentelemetry.io/build-tools/dbotconf v0.12.0 go.opentelemetry.io/build-tools/gotmpl v0.12.0 go.opentelemetry.io/build-tools/multimod v0.12.0 go.opentelemetry.io/build-tools/semconvgen v0.12.0 golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea golang.org/x/tools v0.15.0 golang.org/x/vuln v1.0.1 ) require ( 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect 4d63.com/gochecknoglobals v0.2.1 // indirect dario.cat/mergo v1.0.0 // indirect github.com/4meepo/tagalign v1.3.3 // indirect github.com/Abirdcfly/dupword v0.0.13 // indirect github.com/Antonboom/errname v0.1.12 // indirect github.com/Antonboom/nilnil v0.1.7 // indirect github.com/Antonboom/testifylint v0.2.3 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect github.com/acomagu/bufpipe v1.0.4 // indirect github.com/alecthomas/go-check-sumtype v0.1.3 // indirect github.com/alexkohler/nakedret/v2 v2.0.2 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.1 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect github.com/bombsimon/wsl/v3 v3.4.0 // indirect github.com/breml/bidichk v0.2.7 // indirect github.com/breml/errchkjson v0.3.6 // indirect github.com/butuzov/ireturn v0.2.2 // indirect github.com/butuzov/mirror v1.1.0 // indirect github.com/catenacyber/perfsprint v0.2.0 // indirect github.com/ccojocar/zxcvbn-go v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/charithe/durationcheck v0.0.10 // indirect github.com/chavacava/garif v0.1.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/daixiang0/gci v0.11.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect github.com/fatih/color v1.15.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect github.com/ghostiam/protogetter v0.2.3 // indirect github.com/go-critic/go-critic v0.9.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-git/go-git/v5 v5.9.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.1.0 // indirect github.com/go-toolsmith/astfmt v1.1.0 // indirect github.com/go-toolsmith/astp v1.1.0 // indirect github.com/go-toolsmith/strparse v1.1.0 // indirect github.com/go-toolsmith/typep v1.1.0 // indirect github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect github.com/golangci/misspell v0.4.1 // indirect github.com/golangci/revgrep v0.5.2 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.5 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jgautheron/goconst v1.6.0 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/julz/importas v0.1.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/kisielk/errcheck v1.6.3 // indirect github.com/kisielk/gotool v1.0.0 // indirect github.com/kkHAIKE/contextcheck v1.1.4 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.8 // indirect github.com/kyoh86/exportloopref v0.1.11 // indirect github.com/ldez/gomoddirectives v0.2.3 // indirect github.com/ldez/tagliatelle v0.5.0 // indirect github.com/leonklingele/grouper v1.1.1 // indirect github.com/lufeee/execinquery v1.2.1 // indirect github.com/macabu/inamedparam v0.1.2 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.1 // indirect github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect github.com/mgechev/revive v1.3.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moricho/tparallel v0.3.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect github.com/nishanths/exhaustive v0.11.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect github.com/nunnatsa/ginkgolinter v0.14.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v1.4.5 // indirect github.com/prometheus/client_golang v1.17.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.44.0 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/quasilyte/go-ruleguard v0.4.0 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/ryancurrah/gomodguard v1.3.0 // indirect github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect github.com/securego/gosec/v2 v2.18.2 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/nosnakecase v1.7.0 // indirect github.com/sivchari/tenv v1.7.1 // indirect github.com/skeema/knownhosts v1.2.0 // indirect github.com/sonatard/noctx v0.0.2 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/stretchr/testify v1.8.4 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tdakkota/asciicheck v0.2.0 // indirect github.com/tetafro/godot v1.4.15 // indirect github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect github.com/timonwong/loggercheck v0.9.4 // indirect github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.0.5 // indirect github.com/uudashr/gocognit v1.1.2 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xen0n/gosmopolitan v1.2.2 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect github.com/ykadowak/zerologlint v0.1.3 // indirect gitlab.com/bosi/decorder v0.4.1 // indirect go-simpler.org/sloglint v0.1.2 // indirect go.opentelemetry.io/build-tools v0.12.0 // indirect go.tmz.dev/musttag v0.7.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.15.0 // indirect golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.18.0 // indirect golang.org/x/sync v0.5.0 // indirect golang.org/x/sys v0.14.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect honnef.co/go/tools v0.4.6 // indirect mvdan.cc/gofumpt v0.5.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 // indirect ) opentelemetry-go-1.21.0/internal/tools/go.sum000066400000000000000000003134211452547353200212170ustar00rootroot000000000000004d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= 4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= 4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= 4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo= github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y= github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow= github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= github.com/Antonboom/testifylint v0.2.3 h1:MFq9zyL+rIVpsvLX4vDPLojgN7qODzWsrnftNX2Qh60= github.com/Antonboom/testifylint v0.2.3/go.mod h1:IYaXaOX9NbfAyO+Y04nfjGI8wDemC1rUyM/cYolz018= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 h1:3ZBs7LAezy8gh0uECsA6CGU43FF3zsx5f4eah5FxTMA= github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0/go.mod h1:rZLTje5A9kFBe0pzhpe2TdhRniBF++PRHQuRpR8esVc= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY= github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= github.com/alecthomas/go-check-sumtype v0.1.3 h1:M+tqMxB68hcgccRXBMVCPI4UJ+QUfdSx0xdbypKCqA8= github.com/alecthomas/go-check-sumtype v0.1.3/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE= github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= github.com/butuzov/ireturn v0.2.2 h1:jWI36dxXwVrI+RnXDwux2IZOewpmfv930OuIRfaBUJ0= github.com/butuzov/ireturn v0.2.2/go.mod h1:RfGHUvvAuFFxoHKf4Z8Yxuh6OjlCw1KvR2zM1NFHeBk= github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/catenacyber/perfsprint v0.2.0 h1:azOocHLscPjqXVJ7Mf14Zjlkn4uNua0+Hcg1wTR6vUo= github.com/catenacyber/perfsprint v0.2.0/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4= github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y= github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghostiam/protogetter v0.2.3 h1:qdv2pzo3BpLqezwqfGDLZ+nHEYmc5bUpIdsMbBVwMjw= github.com/ghostiam/protogetter v0.2.3/go.mod h1:KmNLOsy1v04hKbvZs8EfGI1fk39AgTdRDxWNYPfXVc4= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U= github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8= github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= github.com/golangci/golangci-lint v1.55.2 h1:yllEIsSJ7MtlDBwDJ9IMBkyEUz2fYE0b5B8IUgO1oP8= github.com/golangci/golangci-lint v1.55.2/go.mod h1:H60CZ0fuqoTwlTvnbyjhpZPWp7KmsjwV2yupIMiMXbM= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU= github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786 h1:rcv+Ippz6RAtvaGgKxc+8FQIpxHgsF+HBzPyYL2cyVU= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8= github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/itchyny/gojq v0.12.13 h1:IxyYlHYIlspQHHTE0f3cJF0NKDMfajxViuhBLnHd/QU= github.com/itchyny/gojq v0.12.13/go.mod h1:JzwzAqenfhrPUuwbmEz3nu3JQmFLlQTQMUcOdnu/Sf4= github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE= github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcchavezs/porto v0.6.0 h1:AgQLGwsXaxDkPj4Y+paFkVGLAR4n/1RRF0xV5UKinwg= github.com/jcchavezs/porto v0.6.0/go.mod h1:fESH0gzDHiutHRdX2hv27ojnOVFco37hg1W6E9EZF4A= github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA= github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.8 h1:Ul2KsqtzFxTlSU7IP0JusWlLiNqQaloB9vguyjbE558= github.com/kunwardeep/paralleltest v1.0.8/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/macabu/inamedparam v0.1.2 h1:RR5cnayM6Q7cDhQol32DE2BGAPGMnffJ31LFE+UklaU= github.com/macabu/inamedparam v0.1.2/go.mod h1:Xg25QvY7IBRl1KLPV9Rbml8JOMZtF/iAkNkmV7eQgjw= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc= github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0= github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nunnatsa/ginkgolinter v0.14.1 h1:khx0CqR5U4ghsscjJ+lZVthp3zjIFytRXPTaQ/TMiyA= github.com/nunnatsa/ginkgolinter v0.14.1/go.mod h1:nY0pafUSst7v7F637e7fymaMlQqI9c0Wka2fGsDkzWg= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.12.0 h1:cLMgSQnXBs1eehF0Wy/FAGsgDTDmAqFR7rQylBb1nDY= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polyfloyd/go-errorlint v1.4.5 h1:70YWmMy4FgRHehGNOUask3HtSFSOLKgmDn7ryNe7LqI= github.com/polyfloyd/go-errorlint v1.4.5/go.mod h1:sIZEbFoDOCnTYYZoVkjc4hTnM459tuWA9H/EkdXwsKk= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo= github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10= github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= github.com/sashamelentyev/usestdlibvars v1.24.0 h1:MKNzmXtGh5N0y74Z/CIaJh4GlB364l0K1RUT08WSWAc= github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSCnTVUC1OQP/bSiiBhq3OZE= github.com/securego/gosec/v2 v2.18.2 h1:DkDt3wCiOtAHf1XkiXZBhQ6m6mK/b9T/wD257R3/c+I= github.com/securego/gosec/v2 v2.18.2/go.mod h1:xUuqSF6i0So56Y2wwohWAmB07EdBkUN6crbLlHwbyJs= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= github.com/tetafro/godot v1.4.15 h1:QzdIs+XB8q+U1WmQEWKHQbKmCw06QuQM7gLx/dky2RM= github.com/tetafro/godot v1.4.15/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad h1:W0LEBv82YCGEtcmPA3uNZBI33/qF//HAAs3MawDjRa0= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= github.com/ykadowak/zerologlint v0.1.3 h1:TLy1dTW3Nuc+YE3bYRPToG1Q9Ej78b5UUN6bjbGdxPE= github.com/ykadowak/zerologlint v0.1.3/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E= go-simpler.org/sloglint v0.1.2 h1:IjdhF8NPxyn0Ckn2+fuIof7ntSnVUAqBFcQRrnG9AiM= go-simpler.org/sloglint v0.1.2/go.mod h1:2LL+QImPfTslD5muNPydAEYmpXIj6o/WYcqnJjLi4o4= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/build-tools v0.12.0 h1:ZqK1GuqBp9Mf1RthYO3/jjf9tPWzeHMcVDo0itFi/lI= go.opentelemetry.io/build-tools v0.12.0/go.mod h1:I76Qvv9cN055XJfTHw9t257EUd5Yp0EofeTMESlZuRU= go.opentelemetry.io/build-tools/crosslink v0.12.0 h1:GNJQURuabE5rAkIbnrqndIKyXrr7wFy54e/8ujkgjHg= go.opentelemetry.io/build-tools/crosslink v0.12.0/go.mod h1:QE8Kxf4Ygg2ltSHE+Vdys/67jtQM26j7spJLyjNA2DU= go.opentelemetry.io/build-tools/dbotconf v0.12.0 h1:I+oaEtAMK+nd660l//r14d3AI1A8BB3A4hKArvUX/n4= go.opentelemetry.io/build-tools/dbotconf v0.12.0/go.mod h1:K0Xszcb11bbFtVpjieY8gzGWLw9SNarDKvFW1Ti7w4U= go.opentelemetry.io/build-tools/gotmpl v0.12.0 h1:ysCtNFkoJddyaAdemtdbI6Qn7nb7GYn2WbHmajTW+pM= go.opentelemetry.io/build-tools/gotmpl v0.12.0/go.mod h1:FzweYUfAJC1i5ATrtFI4KJggnO9QQGPdSVKWA8RHjdE= go.opentelemetry.io/build-tools/multimod v0.12.0 h1:DKi+A+4EaKrOZDTNDDZz3ijiAduEQDo8j1rzWUaGUHo= go.opentelemetry.io/build-tools/multimod v0.12.0/go.mod h1:w03q3WgZs7reoBNnmfdClkKdTIA/IHM8ric5E2jEDD0= go.opentelemetry.io/build-tools/semconvgen v0.12.0 h1:AsjYFwo8sSLAjwjklj+yVwm2xogJUxRf5pxflATg9N0= go.opentelemetry.io/build-tools/semconvgen v0.12.0/go.mod h1:SRmou8pp+7gBmf1AvdxOTwVts74Syyrgm1/Qx7R8mis= go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s= go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQJHQdp89IZBA/+azVC4= golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ= golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/vuln v1.0.1 h1:KUas02EjQK5LTuIx1OylBQdKKZ9jeugs+HiqO5HormU= golang.org/x/vuln v1.0.1/go.mod h1:bb2hMwln/tqxg32BNY4CcxHWtHXuYa3SbIBmtsyjxtM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 h1:VuJo4Mt0EVPychre4fNlDWDuE5AjXtPJpRUWqZDQhaI= mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8/go.mod h1:Oh/d7dEtzsNHGOq1Cdv8aMm3KdKhVvPbRQcM8WFpBR8= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= opentelemetry-go-1.21.0/internal/tools/semconvkit/000077500000000000000000000000001452547353200222425ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/tools/semconvkit/main.go000066400000000000000000000041471452547353200235230ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconvkit is used to generate opentelemetry-go specific semantic // convention code. It is expected to be used in with the semconvgen utility // (go.opentelemetry.io/build-tools/semconvgen) to completely generate // versioned sub-packages of go.opentelemetry.io/otel/semconv. package main import ( "embed" "flag" "log" "os" "path/filepath" "strings" "text/template" ) var ( out = flag.String("output", "./", "output directory") tag = flag.String("tag", "", "OpenTelemetry tagged version") //go:embed templates/*.tmpl rootFS embed.FS ) // SemanticConventions are information about the semantic conventions being // generated. type SemanticConventions struct { // TagVer is the tagged version (i.e. v1.7.0 and not 1.7.0). TagVer string } func (sc SemanticConventions) SemVer() string { return strings.TrimPrefix(*tag, "v") } // render renders all templates to the dest directory using the data. func render(src, dest string, data *SemanticConventions) error { tmpls, err := template.ParseFS(rootFS, src) if err != nil { return err } for _, tmpl := range tmpls.Templates() { target := filepath.Join(dest, strings.TrimSuffix(tmpl.Name(), ".tmpl")) wr, err := os.Create(target) if err != nil { return err } err = tmpl.Execute(wr, data) if err != nil { return err } } return nil } func main() { flag.Parse() if *tag == "" { log.Fatalf("invalid tag: %q", *tag) } sc := &SemanticConventions{TagVer: *tag} if err := render("templates/*.tmpl", *out, sc); err != nil { log.Fatal(err) } } opentelemetry-go-1.21.0/internal/tools/semconvkit/templates/000077500000000000000000000000001452547353200242405ustar00rootroot00000000000000opentelemetry-go-1.21.0/internal/tools/semconvkit/templates/doc.go.tmpl000066400000000000000000000016741452547353200263170ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the {{.TagVer}} version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/{{.TagVer}}" opentelemetry-go-1.21.0/internal/tools/semconvkit/templates/exception.go.tmpl000066400000000000000000000014341452547353200275420ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/{{.TagVer}}" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/internal/tools/semconvkit/templates/schema.go.tmpl000066400000000000000000000017251452547353200270070ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/{{.TagVer}}" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/{{.SemVer}}" opentelemetry-go-1.21.0/internal/tools/tools.go000066400000000000000000000024361452547353200215540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build tools // +build tools package tools // import "go.opentelemetry.io/otel/internal/tools" import ( _ "github.com/client9/misspell/cmd/misspell" _ "github.com/gogo/protobuf/protoc-gen-gogofast" _ "github.com/golangci/golangci-lint/cmd/golangci-lint" _ "github.com/itchyny/gojq" _ "github.com/jcchavezs/porto/cmd/porto" _ "github.com/wadey/gocovmerge" _ "go.opentelemetry.io/build-tools/crosslink" _ "go.opentelemetry.io/build-tools/dbotconf" _ "go.opentelemetry.io/build-tools/gotmpl" _ "go.opentelemetry.io/build-tools/multimod" _ "go.opentelemetry.io/build-tools/semconvgen" _ "golang.org/x/exp/cmd/gorelease" _ "golang.org/x/tools/cmd/stringer" _ "golang.org/x/vuln/cmd/govulncheck" ) opentelemetry-go-1.21.0/internal_logging.go000066400000000000000000000015431452547353200207600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" import ( "github.com/go-logr/logr" "go.opentelemetry.io/otel/internal/global" ) // SetLogger configures the logger used internally to opentelemetry. func SetLogger(logger logr.Logger) { global.SetLogger(logger) } opentelemetry-go-1.21.0/internal_logging_test.go000066400000000000000000000014711452547353200220170ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel_test import ( "log" "os" "github.com/go-logr/stdr" "go.opentelemetry.io/otel" ) func ExampleSetLogger() { logger := stdr.New(log.New(os.Stdout, "", log.LstdFlags|log.Lshortfile)) otel.SetLogger(logger) } opentelemetry-go-1.21.0/metric.go000066400000000000000000000042201452547353200167140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" ) // Meter returns a Meter from the global MeterProvider. The name must be the // name of the library providing instrumentation. This name may be the same as // the instrumented code only if that code provides built-in instrumentation. // If the name is empty, then a implementation defined default name will be // used instead. // // If this is called before a global MeterProvider is registered the returned // Meter will be a No-op implementation of a Meter. When a global MeterProvider // is registered for the first time, the returned Meter, and all the // instruments it has created or will create, are recreated automatically from // the new MeterProvider. // // This is short for GetMeterProvider().Meter(name). func Meter(name string, opts ...metric.MeterOption) metric.Meter { return GetMeterProvider().Meter(name, opts...) } // GetMeterProvider returns the registered global meter provider. // // If no global GetMeterProvider has been registered, a No-op GetMeterProvider // implementation is returned. When a global GetMeterProvider is registered for // the first time, the returned GetMeterProvider, and all the Meters it has // created or will create, are recreated automatically from the new // GetMeterProvider. func GetMeterProvider() metric.MeterProvider { return global.MeterProvider() } // SetMeterProvider registers mp as the global MeterProvider. func SetMeterProvider(mp metric.MeterProvider) { global.SetMeterProvider(mp) } opentelemetry-go-1.21.0/metric/000077500000000000000000000000001452547353200163675ustar00rootroot00000000000000opentelemetry-go-1.21.0/metric/asyncfloat64.go000066400000000000000000000235201452547353200212350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "context" "go.opentelemetry.io/otel/metric/embedded" ) // Float64Observable describes a set of instruments used asynchronously to // record float64 measurements once per collection cycle. Observations of // these instruments are only made within a callback. // // Warning: Methods may be added to this interface in minor releases. type Float64Observable interface { Observable float64Observable() } // Float64ObservableCounter is an instrument used to asynchronously record // increasing float64 measurements once per collection cycle. Observations are // only made within a callback for this instrument. The value observed is // assumed the to be the cumulative sum of the count. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for // unimplemented methods. type Float64ObservableCounter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Float64ObservableCounter Float64Observable } // Float64ObservableCounterConfig contains options for asynchronous counter // instruments that record int64 values. type Float64ObservableCounterConfig struct { description string unit string callbacks []Float64Callback } // NewFloat64ObservableCounterConfig returns a new // [Float64ObservableCounterConfig] with all opts applied. func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { var config Float64ObservableCounterConfig for _, o := range opts { config = o.applyFloat64ObservableCounter(config) } return config } // Description returns the configured description. func (c Float64ObservableCounterConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Float64ObservableCounterConfig) Unit() string { return c.unit } // Callbacks returns the configured callbacks. func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { return c.callbacks } // Float64ObservableCounterOption applies options to a // [Float64ObservableCounterConfig]. See [Float64ObservableOption] and // [InstrumentOption] for other options that can be used as a // Float64ObservableCounterOption. type Float64ObservableCounterOption interface { applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig } // Float64ObservableUpDownCounter is an instrument used to asynchronously // record float64 measurements once per collection cycle. Observations are only // made within a callback for this instrument. The value observed is assumed // the to be the cumulative sum of the count. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Float64ObservableUpDownCounter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Float64ObservableUpDownCounter Float64Observable } // Float64ObservableUpDownCounterConfig contains options for asynchronous // counter instruments that record int64 values. type Float64ObservableUpDownCounterConfig struct { description string unit string callbacks []Float64Callback } // NewFloat64ObservableUpDownCounterConfig returns a new // [Float64ObservableUpDownCounterConfig] with all opts applied. func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { var config Float64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyFloat64ObservableUpDownCounter(config) } return config } // Description returns the configured description. func (c Float64ObservableUpDownCounterConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Float64ObservableUpDownCounterConfig) Unit() string { return c.unit } // Callbacks returns the configured callbacks. func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { return c.callbacks } // Float64ObservableUpDownCounterOption applies options to a // [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and // [InstrumentOption] for other options that can be used as a // Float64ObservableUpDownCounterOption. type Float64ObservableUpDownCounterOption interface { applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig } // Float64ObservableGauge is an instrument used to asynchronously record // instantaneous float64 measurements once per collection cycle. Observations // are only made within a callback for this instrument. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Float64ObservableGauge interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Float64ObservableGauge Float64Observable } // Float64ObservableGaugeConfig contains options for asynchronous counter // instruments that record int64 values. type Float64ObservableGaugeConfig struct { description string unit string callbacks []Float64Callback } // NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] // with all opts applied. func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { var config Float64ObservableGaugeConfig for _, o := range opts { config = o.applyFloat64ObservableGauge(config) } return config } // Description returns the configured description. func (c Float64ObservableGaugeConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Float64ObservableGaugeConfig) Unit() string { return c.unit } // Callbacks returns the configured callbacks. func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { return c.callbacks } // Float64ObservableGaugeOption applies options to a // [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and // [InstrumentOption] for other options that can be used as a // Float64ObservableGaugeOption. type Float64ObservableGaugeOption interface { applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig } // Float64Observer is a recorder of float64 measurements. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Float64Observer interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Float64Observer // Observe records the float64 value. // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. Observe(value float64, options ...ObserveOption) } // Float64Callback is a function registered with a Meter that makes // observations for a Float64Observerable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // // The function needs to complete in a finite amount of time and the deadline // of the passed context is expected to be honored. // // The function needs to make unique observations across all registered // Float64Callbacks. Meaning, it should not report measurements with the same // attributes as another Float64Callbacks also registered for the same // instrument. // // The function needs to be concurrent safe. type Float64Callback func(context.Context, Float64Observer) error // Float64ObservableOption applies options to float64 Observer instruments. type Float64ObservableOption interface { Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption } type float64CallbackOpt struct { cback Float64Callback } func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } // WithFloat64Callback adds callback to be called for an instrument. func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { return float64CallbackOpt{callback} } opentelemetry-go-1.21.0/metric/asyncfloat64_test.go000066400000000000000000000044301452547353200222730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/metric/embedded" ) func TestFloat64ObservableConfiguration(t *testing.T) { const ( token float64 = 43 desc = "Instrument description." uBytes = "By" ) run := func(got float64ObservableConfig) func(*testing.T) { return func(t *testing.T) { assert.Equal(t, desc, got.Description(), "description") assert.Equal(t, uBytes, got.Unit(), "unit") // Functions are not comparable. cBacks := got.Callbacks() require.Len(t, cBacks, 1, "callbacks") o := &float64Observer{} err := cBacks[0](context.Background(), o) require.NoError(t, err) assert.Equal(t, token, o.got, "callback not set") } } cback := func(ctx context.Context, obsrv Float64Observer) error { obsrv.Observe(token) return nil } t.Run("Float64ObservableCounter", run( NewFloat64ObservableCounterConfig( WithDescription(desc), WithUnit(uBytes), WithFloat64Callback(cback), ), )) t.Run("Float64ObservableUpDownCounter", run( NewFloat64ObservableUpDownCounterConfig( WithDescription(desc), WithUnit(uBytes), WithFloat64Callback(cback), ), )) t.Run("Float64ObservableGauge", run( NewFloat64ObservableGaugeConfig( WithDescription(desc), WithUnit(uBytes), WithFloat64Callback(cback), ), )) } type float64ObservableConfig interface { Description() string Unit() string Callbacks() []Float64Callback } type float64Observer struct { embedded.Float64Observer Observable got float64 } func (o *float64Observer) Observe(v float64, _ ...ObserveOption) { o.got = v } opentelemetry-go-1.21.0/metric/asyncint64.go000066400000000000000000000231261452547353200207240ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "context" "go.opentelemetry.io/otel/metric/embedded" ) // Int64Observable describes a set of instruments used asynchronously to record // int64 measurements once per collection cycle. Observations of these // instruments are only made within a callback. // // Warning: Methods may be added to this interface in minor releases. type Int64Observable interface { Observable int64Observable() } // Int64ObservableCounter is an instrument used to asynchronously record // increasing int64 measurements once per collection cycle. Observations are // only made within a callback for this instrument. The value observed is // assumed the to be the cumulative sum of the count. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Int64ObservableCounter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Int64ObservableCounter Int64Observable } // Int64ObservableCounterConfig contains options for asynchronous counter // instruments that record int64 values. type Int64ObservableCounterConfig struct { description string unit string callbacks []Int64Callback } // NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] // with all opts applied. func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { var config Int64ObservableCounterConfig for _, o := range opts { config = o.applyInt64ObservableCounter(config) } return config } // Description returns the configured description. func (c Int64ObservableCounterConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Int64ObservableCounterConfig) Unit() string { return c.unit } // Callbacks returns the configured callbacks. func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { return c.callbacks } // Int64ObservableCounterOption applies options to a // [Int64ObservableCounterConfig]. See [Int64ObservableOption] and // [InstrumentOption] for other options that can be used as an // Int64ObservableCounterOption. type Int64ObservableCounterOption interface { applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig } // Int64ObservableUpDownCounter is an instrument used to asynchronously record // int64 measurements once per collection cycle. Observations are only made // within a callback for this instrument. The value observed is assumed the to // be the cumulative sum of the count. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Int64ObservableUpDownCounter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Int64ObservableUpDownCounter Int64Observable } // Int64ObservableUpDownCounterConfig contains options for asynchronous counter // instruments that record int64 values. type Int64ObservableUpDownCounterConfig struct { description string unit string callbacks []Int64Callback } // NewInt64ObservableUpDownCounterConfig returns a new // [Int64ObservableUpDownCounterConfig] with all opts applied. func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { var config Int64ObservableUpDownCounterConfig for _, o := range opts { config = o.applyInt64ObservableUpDownCounter(config) } return config } // Description returns the configured description. func (c Int64ObservableUpDownCounterConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Int64ObservableUpDownCounterConfig) Unit() string { return c.unit } // Callbacks returns the configured callbacks. func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { return c.callbacks } // Int64ObservableUpDownCounterOption applies options to a // [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and // [InstrumentOption] for other options that can be used as an // Int64ObservableUpDownCounterOption. type Int64ObservableUpDownCounterOption interface { applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig } // Int64ObservableGauge is an instrument used to asynchronously record // instantaneous int64 measurements once per collection cycle. Observations are // only made within a callback for this instrument. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Int64ObservableGauge interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Int64ObservableGauge Int64Observable } // Int64ObservableGaugeConfig contains options for asynchronous counter // instruments that record int64 values. type Int64ObservableGaugeConfig struct { description string unit string callbacks []Int64Callback } // NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] // with all opts applied. func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { var config Int64ObservableGaugeConfig for _, o := range opts { config = o.applyInt64ObservableGauge(config) } return config } // Description returns the configured description. func (c Int64ObservableGaugeConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Int64ObservableGaugeConfig) Unit() string { return c.unit } // Callbacks returns the configured callbacks. func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { return c.callbacks } // Int64ObservableGaugeOption applies options to a // [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and // [InstrumentOption] for other options that can be used as an // Int64ObservableGaugeOption. type Int64ObservableGaugeOption interface { applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig } // Int64Observer is a recorder of int64 measurements. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Int64Observer interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Int64Observer // Observe records the int64 value. // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. Observe(value int64, options ...ObserveOption) } // Int64Callback is a function registered with a Meter that makes observations // for an Int64Observerable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline // of the passed context is expected to be honored. // // The function needs to make unique observations across all registered // Int64Callbacks. Meaning, it should not report measurements with the same // attributes as another Int64Callbacks also registered for the same // instrument. // // The function needs to be concurrent safe. type Int64Callback func(context.Context, Int64Observer) error // Int64ObservableOption applies options to int64 Observer instruments. type Int64ObservableOption interface { Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption } type int64CallbackOpt struct { cback Int64Callback } func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { cfg.callbacks = append(cfg.callbacks, o.cback) return cfg } // WithInt64Callback adds callback to be called for an instrument. func WithInt64Callback(callback Int64Callback) Int64ObservableOption { return int64CallbackOpt{callback} } opentelemetry-go-1.21.0/metric/asyncint64_test.go000066400000000000000000000043521452547353200217630ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/metric/embedded" ) func TestInt64ObservableConfiguration(t *testing.T) { const ( token int64 = 43 desc = "Instrument description." uBytes = "By" ) run := func(got int64ObservableConfig) func(*testing.T) { return func(t *testing.T) { assert.Equal(t, desc, got.Description(), "description") assert.Equal(t, uBytes, got.Unit(), "unit") // Functions are not comparable. cBacks := got.Callbacks() require.Len(t, cBacks, 1, "callbacks") o := &int64Observer{} err := cBacks[0](context.Background(), o) require.NoError(t, err) assert.Equal(t, token, o.got, "callback not set") } } cback := func(ctx context.Context, obsrv Int64Observer) error { obsrv.Observe(token) return nil } t.Run("Int64ObservableCounter", run( NewInt64ObservableCounterConfig( WithDescription(desc), WithUnit(uBytes), WithInt64Callback(cback), ), )) t.Run("Int64ObservableUpDownCounter", run( NewInt64ObservableUpDownCounterConfig( WithDescription(desc), WithUnit(uBytes), WithInt64Callback(cback), ), )) t.Run("Int64ObservableGauge", run( NewInt64ObservableGaugeConfig( WithDescription(desc), WithUnit(uBytes), WithInt64Callback(cback), ), )) } type int64ObservableConfig interface { Description() string Unit() string Callbacks() []Int64Callback } type int64Observer struct { embedded.Int64Observer Observable got int64 } func (o *int64Observer) Observe(v int64, _ ...ObserveOption) { o.got = v } opentelemetry-go-1.21.0/metric/config.go000066400000000000000000000055221452547353200201670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import "go.opentelemetry.io/otel/attribute" // MeterConfig contains options for Meters. type MeterConfig struct { instrumentationVersion string schemaURL string attrs attribute.Set // Ensure forward compatibility by explicitly making this not comparable. noCmp [0]func() //nolint: unused // This is indeed used. } // InstrumentationVersion returns the version of the library providing // instrumentation. func (cfg MeterConfig) InstrumentationVersion() string { return cfg.instrumentationVersion } // InstrumentationAttributes returns the attributes associated with the library // providing instrumentation. func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { return cfg.attrs } // SchemaURL is the schema_url of the library providing instrumentation. func (cfg MeterConfig) SchemaURL() string { return cfg.schemaURL } // MeterOption is an interface for applying Meter options. type MeterOption interface { // applyMeter is used to set a MeterOption value of a MeterConfig. applyMeter(MeterConfig) MeterConfig } // NewMeterConfig creates a new MeterConfig and applies // all the given options. func NewMeterConfig(opts ...MeterOption) MeterConfig { var config MeterConfig for _, o := range opts { config = o.applyMeter(config) } return config } type meterOptionFunc func(MeterConfig) MeterConfig func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { return fn(cfg) } // WithInstrumentationVersion sets the instrumentation version. func WithInstrumentationVersion(version string) MeterOption { return meterOptionFunc(func(config MeterConfig) MeterConfig { config.instrumentationVersion = version return config }) } // WithInstrumentationAttributes sets the instrumentation attributes. // // The passed attributes will be de-duplicated. func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { return meterOptionFunc(func(config MeterConfig) MeterConfig { config.attrs = attribute.NewSet(attr...) return config }) } // WithSchemaURL sets the schema URL. func WithSchemaURL(schemaURL string) MeterOption { return meterOptionFunc(func(config MeterConfig) MeterConfig { config.schemaURL = schemaURL return config }) } opentelemetry-go-1.21.0/metric/config_test.go000066400000000000000000000025211452547353200212220ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric_test import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" ) func TestConfig(t *testing.T) { version := "v1.1.1" schemaURL := "https://opentelemetry.io/schemas/1.0.0" attr := attribute.NewSet( attribute.String("user", "alice"), attribute.Bool("admin", true), ) c := metric.NewMeterConfig( metric.WithInstrumentationVersion(version), metric.WithSchemaURL(schemaURL), metric.WithInstrumentationAttributes(attr.ToSlice()...), ) assert.Equal(t, version, c.InstrumentationVersion(), "instrumentation version") assert.Equal(t, schemaURL, c.SchemaURL(), "schema URL") assert.Equal(t, attr, c.InstrumentationAttributes(), "instrumentation attributes") } opentelemetry-go-1.21.0/metric/doc.go000066400000000000000000000170331452547353200174670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package metric provides the OpenTelemetry API used to measure metrics about source code operation. This API is separate from its implementation so the instrumentation built from it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official OpenTelemetry implementation of this API. All measurements made with this package are made via instruments. These instruments are created by a [Meter] which itself is created by a [MeterProvider]. Applications need to accept a [MeterProvider] implementation as a starting point when instrumenting. This can be done directly, or by using the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an appropriately named [Meter] from the accepted [MeterProvider], instrumentation can then be built from the [Meter]'s instruments. # Instruments Each instrument is designed to make measurements of a particular type. Broadly, all instruments fall into two overlapping logical categories: asynchronous or synchronous, and int64 or float64. All synchronous instruments ([Int64Counter], [Int64UpDownCounter], [Int64Histogram], [Float64Counter], [Float64UpDownCounter], and [Float64Histogram]) are used to measure the operation and performance of source code during the source code execution. These instruments only make measurements when the source code they instrument is run. All asynchronous instruments ([Int64ObservableCounter], [Int64ObservableUpDownCounter], [Int64ObservableGauge], [Float64ObservableCounter], [Float64ObservableUpDownCounter], and [Float64ObservableGauge]) are used to measure metrics outside of the execution of source code. They are said to make "observations" via a callback function called once every measurement collection cycle. Each instrument is also grouped by the value type it measures. Either int64 or float64. The value being measured will dictate which instrument in these categories to use. Outside of these two broad categories, instruments are described by the function they are designed to serve. All Counters ([Int64Counter], [Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are designed to measure values that never decrease in value, but instead only incrementally increase in value. UpDownCounters ([Int64UpDownCounter], [Float64UpDownCounter], [Int64ObservableUpDownCounter], and [Float64ObservableUpDownCounter]) on the other hand, are designed to measure values that can increase and decrease. When more information needs to be conveyed about all the synchronous measurements made during a collection cycle, a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, when just the most recent measurement needs to be conveyed about an asynchronous measurement, a Gauge ([Int64ObservableGauge] and [Float64ObservableGauge]) should be used. See the [OpenTelemetry documentation] for more information about instruments and their intended use. # Measurements Measurements are made by recording values and information about the values with an instrument. How these measurements are recorded depends on the instrument. Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], [Int64Histogram], [Float64Counter], [Float64UpDownCounter], and [Float64Histogram]) are recorded using the instrument methods directly. All counter instruments have an Add method that is used to measure an increment value, and all histogram instruments have a Record method to measure a data point. Asynchronous instruments ([Int64ObservableCounter], [Int64ObservableUpDownCounter], [Int64ObservableGauge], [Float64ObservableCounter], [Float64ObservableUpDownCounter], and [Float64ObservableGauge]) record measurements within a callback function. The callback is registered with the Meter which ensures the callback is called once per collection cycle. A callback can be registered two ways: during the instrument's creation using an option, or later using the RegisterCallback method of the [Meter] that created the instrument. If the following criteria are met, an option ([WithInt64Callback] or [WithFloat64Callback]) can be used during the asynchronous instrument's creation to register a callback ([Int64Callback] or [Float64Callback], respectively): - The measurement process is known when the instrument is created - Only that instrument will make a measurement within the callback - The callback never needs to be unregistered If the criteria are not met, use the RegisterCallback method of the [Meter] that created the instrument to register a [Callback]. # API Implementations This package does not conform to the standard Go versioning policy, all of its interfaces may have methods added to them without a package major version bump. This non-standard API evolution could surprise an uninformed implementation author. They could unknowingly build their implementation in a way that would result in a runtime panic for their users that update to the new API. The API is designed to help inform an instrumentation author about this non-standard API evolution. It requires them to choose a default behavior for unimplemented interface methods. There are three behavior choices they can make: - Compilation failure - Panic - Default to another implementation All interfaces in this API embed a corresponding interface from [go.opentelemetry.io/otel/metric/embedded]. If an author wants the default behavior of their implementations to be a compilation failure, signaling to their users they need to update to the latest version of that implementation, they need to embed the corresponding interface from [go.opentelemetry.io/otel/metric/embedded] in their implementation. For example, import "go.opentelemetry.io/otel/metric/embedded" type MeterProvider struct { embedded.MeterProvider // ... } If an author wants the default behavior of their implementations to a panic, they need to embed the API interface directly. import "go.opentelemetry.io/otel/metric" type MeterProvider struct { metric.MeterProvider // ... } This is not a recommended behavior as it could lead to publishing packages that contain runtime panics when users update other package that use newer versions of [go.opentelemetry.io/otel/metric]. Finally, an author can embed another implementation in theirs. The embedded implementation will be used for methods not defined by the author. For example, an author who wants to default to silently dropping the call can use [go.opentelemetry.io/otel/metric/noop]: import "go.opentelemetry.io/otel/metric/noop" type MeterProvider struct { noop.MeterProvider // ... } It is strongly recommended that authors only embed [go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. [OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ [GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider */ package metric // import "go.opentelemetry.io/otel/metric" opentelemetry-go-1.21.0/metric/embedded/000077500000000000000000000000001452547353200201205ustar00rootroot00000000000000opentelemetry-go-1.21.0/metric/embedded/embedded.go000066400000000000000000000257241452547353200222120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package embedded provides interfaces embedded within the [OpenTelemetry // metric API]. // // Implementers of the [OpenTelemetry metric API] can embed the relevant type // from this package into their implementation directly. Doing so will result // in a compilation error for users when the [OpenTelemetry metric API] is // extended (which is something that can happen without a major version bump of // the API package). // // [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric package embedded // import "go.opentelemetry.io/otel/metric/embedded" // MeterProvider is embedded in // [go.opentelemetry.io/otel/metric.MeterProvider]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to // experience a compilation error, signaling they need to update to your latest // implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] // interface is extended (which is something that can happen without a major // version bump of the API package). type MeterProvider interface{ meterProvider() } // Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a // compilation error, signaling they need to update to your latest // implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface // is extended (which is something that can happen without a major version bump // of the API package). type Meter interface{ meter() } // Float64Observer is embedded in // [go.opentelemetry.io/otel/metric.Float64Observer]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Float64Observer] if you want // users to experience a compilation error, signaling they need to update to // your latest implementation, when the // [go.opentelemetry.io/otel/metric.Float64Observer] interface is // extended (which is something that can happen without a major version bump of // the API package). type Float64Observer interface{ float64Observer() } // Int64Observer is embedded in // [go.opentelemetry.io/otel/metric.Int64Observer]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Int64Observer] if you want users // to experience a compilation error, signaling they need to update to your // latest implementation, when the // [go.opentelemetry.io/otel/metric.Int64Observer] interface is // extended (which is something that can happen without a major version bump of // the API package). type Int64Observer interface{ int64Observer() } // Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a // compilation error, signaling they need to update to your latest // implementation, when the [go.opentelemetry.io/otel/metric.Observer] // interface is extended (which is something that can happen without a major // version bump of the API package). type Observer interface{ observer() } // Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Registration] if you want users to // experience a compilation error, signaling they need to update to your latest // implementation, when the [go.opentelemetry.io/otel/metric.Registration] // interface is extended (which is something that can happen without a major // version bump of the API package). type Registration interface{ registration() } // Float64Counter is embedded in // [go.opentelemetry.io/otel/metric.Float64Counter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Float64Counter] if you want // users to experience a compilation error, signaling they need to update to // your latest implementation, when the // [go.opentelemetry.io/otel/metric.Float64Counter] interface is // extended (which is something that can happen without a major version bump of // the API package). type Float64Counter interface{ float64Counter() } // Float64Histogram is embedded in // [go.opentelemetry.io/otel/metric.Float64Histogram]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Float64Histogram] if you want // users to experience a compilation error, signaling they need to update to // your latest implementation, when the // [go.opentelemetry.io/otel/metric.Float64Histogram] interface is // extended (which is something that can happen without a major version bump of // the API package). type Float64Histogram interface{ float64Histogram() } // Float64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you // want users to experience a compilation error, signaling they need to update // to your latest implementation, when the // [go.opentelemetry.io/otel/metric.Float64ObservableCounter] // interface is extended (which is something that can happen without a major // version bump of the API package). type Float64ObservableCounter interface{ float64ObservableCounter() } // Float64ObservableGauge is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you // want users to experience a compilation error, signaling they need to update // to your latest implementation, when the // [go.opentelemetry.io/otel/metric.Float64ObservableGauge] // interface is extended (which is something that can happen without a major // version bump of the API package). type Float64ObservableGauge interface{ float64ObservableGauge() } // Float64ObservableUpDownCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] // if you want users to experience a compilation error, signaling they need to // update to your latest implementation, when the // [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] // interface is extended (which is something that can happen without a major // version bump of the API package). type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } // Float64UpDownCounter is embedded in // [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you // want users to experience a compilation error, signaling they need to update // to your latest implementation, when the // [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface // is extended (which is something that can happen without a major version bump // of the API package). type Float64UpDownCounter interface{ float64UpDownCounter() } // Int64Counter is embedded in // [go.opentelemetry.io/otel/metric.Int64Counter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Int64Counter] if you want users // to experience a compilation error, signaling they need to update to your // latest implementation, when the // [go.opentelemetry.io/otel/metric.Int64Counter] interface is // extended (which is something that can happen without a major version bump of // the API package). type Int64Counter interface{ int64Counter() } // Int64Histogram is embedded in // [go.opentelemetry.io/otel/metric.Int64Histogram]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Int64Histogram] if you want // users to experience a compilation error, signaling they need to update to // your latest implementation, when the // [go.opentelemetry.io/otel/metric.Int64Histogram] interface is // extended (which is something that can happen without a major version bump of // the API package). type Int64Histogram interface{ int64Histogram() } // Int64ObservableCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you // want users to experience a compilation error, signaling they need to update // to your latest implementation, when the // [go.opentelemetry.io/otel/metric.Int64ObservableCounter] // interface is extended (which is something that can happen without a major // version bump of the API package). type Int64ObservableCounter interface{ int64ObservableCounter() } // Int64ObservableGauge is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you // want users to experience a compilation error, signaling they need to update // to your latest implementation, when the // [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface // is extended (which is something that can happen without a major version bump // of the API package). type Int64ObservableGauge interface{ int64ObservableGauge() } // Int64ObservableUpDownCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if // you want users to experience a compilation error, signaling they need to // update to your latest implementation, when the // [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] // interface is extended (which is something that can happen without a major // version bump of the API package). type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } // Int64UpDownCounter is embedded in // [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want // users to experience a compilation error, signaling they need to update to // your latest implementation, when the // [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is // extended (which is something that can happen without a major version bump of // the API package). type Int64UpDownCounter interface{ int64UpDownCounter() } opentelemetry-go-1.21.0/metric/example_test.go000066400000000000000000000173441452547353200214210ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric_test import ( "context" "database/sql" "fmt" "net/http" "runtime" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) var meter = otel.Meter("my-service-meter") func ExampleMeter_synchronous() { // Create a histogram using the global MeterProvider. workDuration, err := meter.Int64Histogram( "workDuration", metric.WithUnit("ms")) if err != nil { fmt.Println("Failed to register instrument") panic(err) } startTime := time.Now() ctx := context.Background() // Do work // ... workDuration.Record(ctx, time.Since(startTime).Milliseconds()) } func ExampleMeter_asynchronous_single() { _, err := meter.Int64ObservableGauge( "DiskUsage", metric.WithUnit("By"), metric.WithInt64Callback(func(_ context.Context, obsrv metric.Int64Observer) error { // Do the real work here to get the real disk usage. For example, // // usage, err := GetDiskUsage(diskID) // if err != nil { // if retryable(err) { // // Retry the usage measurement. // } else { // return err // } // } // // For demonstration purpose, a static value is used here. usage := 75000 obsrv.Observe(int64(usage), metric.WithAttributes(attribute.Int("disk.id", 3))) return nil }), ) if err != nil { fmt.Println("failed to register instrument") panic(err) } } func ExampleMeter_asynchronous_multiple() { // This is just a sample of memory stats to record from the Memstats heapAlloc, err := meter.Int64ObservableUpDownCounter("heapAllocs") if err != nil { fmt.Println("failed to register updown counter for heapAllocs") panic(err) } gcCount, err := meter.Int64ObservableCounter("gcCount") if err != nil { fmt.Println("failed to register counter for gcCount") panic(err) } _, err = meter.RegisterCallback( func(_ context.Context, o metric.Observer) error { memStats := &runtime.MemStats{} // This call does work runtime.ReadMemStats(memStats) o.ObserveInt64(heapAlloc, int64(memStats.HeapAlloc)) o.ObserveInt64(gcCount, int64(memStats.NumGC)) return nil }, heapAlloc, gcCount, ) if err != nil { fmt.Println("Failed to register callback") panic(err) } } // Counters can be used to measure a non-negative, increasing value. // // Here's how you might report the number of calls for an HTTP handler. func ExampleMeter_counter() { apiCounter, err := meter.Int64Counter( "api.counter", metric.WithDescription("Number of API calls."), metric.WithUnit("{call}"), ) if err != nil { panic(err) } http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { apiCounter.Add(r.Context(), 1) // do some work in an API call }) } // UpDown counters can increment and decrement, allowing you to observe // a cumulative value that goes up or down. // // Here's how you might report the number of items of some collection. func ExampleMeter_upDownCounter() { var err error itemsCounter, err := meter.Int64UpDownCounter( "items.counter", metric.WithDescription("Number of items."), metric.WithUnit("{item}"), ) if err != nil { panic(err) } _ = func() { // code that adds an item to the collection itemsCounter.Add(context.Background(), 1) } _ = func() { // code that removes an item from the collection itemsCounter.Add(context.Background(), -1) } } // Histograms are used to measure a distribution of values over time. // // Here's how you might report a distribution of response times for an HTTP handler. func ExampleMeter_histogram() { histogram, err := meter.Float64Histogram( "task.duration", metric.WithDescription("The duration of task execution."), metric.WithUnit("s"), metric.WithExplicitBucketBoundaries(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1, 2.5, 5, 7.5, 10), ) if err != nil { panic(err) } http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { start := time.Now() // do some work in an API call duration := time.Since(start) histogram.Record(r.Context(), duration.Seconds()) }) } // Observable counters can be used to measure an additive, non-negative, // monotonically increasing value. // // Here's how you might report time since the application started. func ExampleMeter_observableCounter() { start := time.Now() if _, err := meter.Float64ObservableCounter( "uptime", metric.WithDescription("The duration since the application started."), metric.WithUnit("s"), metric.WithFloat64Callback(func(_ context.Context, o metric.Float64Observer) error { o.Observe(float64(time.Since(start).Seconds())) return nil }), ); err != nil { panic(err) } } // Observable UpDown counters can increment and decrement, allowing you to measure // an additive, non-negative, non-monotonically increasing cumulative value. // // Here's how you might report some database metrics. func ExampleMeter_observableUpDownCounter() { // The function registers asynchronous metrics for the provided db. // Make sure to unregister metric.Registration before closing the provided db. _ = func(db *sql.DB, meter metric.Meter, poolName string) (metric.Registration, error) { max, err := meter.Int64ObservableUpDownCounter( "db.client.connections.max", metric.WithDescription("The maximum number of open connections allowed."), metric.WithUnit("{connection}"), ) if err != nil { return nil, err } waitTime, err := meter.Int64ObservableUpDownCounter( "db.client.connections.wait_time", metric.WithDescription("The time it took to obtain an open connection from the pool."), metric.WithUnit("ms"), ) if err != nil { return nil, err } reg, err := meter.RegisterCallback( func(_ context.Context, o metric.Observer) error { stats := db.Stats() o.ObserveInt64(max, int64(stats.MaxOpenConnections)) o.ObserveInt64(waitTime, int64(stats.WaitDuration)) return nil }, max, waitTime, ) if err != nil { return nil, err } return reg, nil } } // Observable Gauges should be used to measure non-additive values. // // Here's how you might report memory usage of the heap objects used // in application. func ExampleMeter_observableGauge() { if _, err := meter.Int64ObservableGauge( "memory.heap", metric.WithDescription( "Memory usage of the allocated heap objects.", ), metric.WithUnit("By"), metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error { var m runtime.MemStats runtime.ReadMemStats(&m) o.Observe(int64(m.HeapAlloc)) return nil }), ); err != nil { panic(err) } } // You can add Attributes by using the [WithAttributeSet] and [WithAttributes] options. // // Here's how you might add the HTTP status code attribute to your recordings. func ExampleMeter_attributes() { apiCounter, err := meter.Int64UpDownCounter( "api.finished.counter", metric.WithDescription("Number of finished API calls."), metric.WithUnit("{call}"), ) if err != nil { panic(err) } http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { // do some work in an API call and set the response HTTP status code statusCode := http.StatusOK apiCounter.Add(r.Context(), 1, metric.WithAttributes(semconv.HTTPStatusCode(statusCode))) }) } opentelemetry-go-1.21.0/metric/go.mod000066400000000000000000000007771452547353200175100ustar00rootroot00000000000000module go.opentelemetry.io/otel/metric go 1.20 require ( github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel => ../ replace go.opentelemetry.io/otel/trace => ../trace opentelemetry-go-1.21.0/metric/go.sum000066400000000000000000000025411452547353200175240ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/metric/instrument.go000066400000000000000000000241171452547353200211330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import "go.opentelemetry.io/otel/attribute" // Observable is used as a grouping mechanism for all instruments that are // updated within a Callback. type Observable interface { observable() } // InstrumentOption applies options to all instruments. type InstrumentOption interface { Int64CounterOption Int64UpDownCounterOption Int64HistogramOption Int64ObservableCounterOption Int64ObservableUpDownCounterOption Int64ObservableGaugeOption Float64CounterOption Float64UpDownCounterOption Float64HistogramOption Float64ObservableCounterOption Float64ObservableUpDownCounterOption Float64ObservableGaugeOption } // HistogramOption applies options to histogram instruments. type HistogramOption interface { Int64HistogramOption Float64HistogramOption } type descOpt string func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { c.description = string(o) return c } func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { c.description = string(o) return c } func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { c.description = string(o) return c } func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.description = string(o) return c } func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { c.description = string(o) return c } func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { c.description = string(o) return c } func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { c.description = string(o) return c } func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { c.description = string(o) return c } func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { c.description = string(o) return c } func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.description = string(o) return c } func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { c.description = string(o) return c } func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { c.description = string(o) return c } // WithDescription sets the instrument description. func WithDescription(desc string) InstrumentOption { return descOpt(desc) } type unitOpt string func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { c.unit = string(o) return c } func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { c.unit = string(o) return c } func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { c.unit = string(o) return c } func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { c.unit = string(o) return c } func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { c.unit = string(o) return c } func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { c.unit = string(o) return c } func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { c.unit = string(o) return c } func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { c.unit = string(o) return c } func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { c.unit = string(o) return c } func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { c.unit = string(o) return c } func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { c.unit = string(o) return c } func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { c.unit = string(o) return c } // WithUnit sets the instrument unit. // // The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. func WithUnit(u string) InstrumentOption { return unitOpt(u) } // WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. // // This option is considered "advisory", and may be ignored by API implementations. func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } type bucketOpt []float64 func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { c.explicitBucketBoundaries = o return c } func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { c.explicitBucketBoundaries = o return c } // AddOption applies options to an addition measurement. See // [MeasurementOption] for other options that can be used as an AddOption. type AddOption interface { applyAdd(AddConfig) AddConfig } // AddConfig contains options for an addition measurement. type AddConfig struct { attrs attribute.Set } // NewAddConfig returns a new [AddConfig] with all opts applied. func NewAddConfig(opts []AddOption) AddConfig { config := AddConfig{attrs: *attribute.EmptySet()} for _, o := range opts { config = o.applyAdd(config) } return config } // Attributes returns the configured attribute set. func (c AddConfig) Attributes() attribute.Set { return c.attrs } // RecordOption applies options to an addition measurement. See // [MeasurementOption] for other options that can be used as a RecordOption. type RecordOption interface { applyRecord(RecordConfig) RecordConfig } // RecordConfig contains options for a recorded measurement. type RecordConfig struct { attrs attribute.Set } // NewRecordConfig returns a new [RecordConfig] with all opts applied. func NewRecordConfig(opts []RecordOption) RecordConfig { config := RecordConfig{attrs: *attribute.EmptySet()} for _, o := range opts { config = o.applyRecord(config) } return config } // Attributes returns the configured attribute set. func (c RecordConfig) Attributes() attribute.Set { return c.attrs } // ObserveOption applies options to an addition measurement. See // [MeasurementOption] for other options that can be used as a ObserveOption. type ObserveOption interface { applyObserve(ObserveConfig) ObserveConfig } // ObserveConfig contains options for an observed measurement. type ObserveConfig struct { attrs attribute.Set } // NewObserveConfig returns a new [ObserveConfig] with all opts applied. func NewObserveConfig(opts []ObserveOption) ObserveConfig { config := ObserveConfig{attrs: *attribute.EmptySet()} for _, o := range opts { config = o.applyObserve(config) } return config } // Attributes returns the configured attribute set. func (c ObserveConfig) Attributes() attribute.Set { return c.attrs } // MeasurementOption applies options to all instrument measurement. type MeasurementOption interface { AddOption RecordOption ObserveOption } type attrOpt struct { set attribute.Set } // mergeSets returns the union of keys between a and b. Any duplicate keys will // use the value associated with b. func mergeSets(a, b attribute.Set) attribute.Set { // NewMergeIterator uses the first value for any duplicates. iter := attribute.NewMergeIterator(&b, &a) merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) for iter.Next() { merged = append(merged, iter.Attribute()) } return attribute.NewSet(merged...) } func (o attrOpt) applyAdd(c AddConfig) AddConfig { switch { case o.set.Len() == 0: case c.attrs.Len() == 0: c.attrs = o.set default: c.attrs = mergeSets(c.attrs, o.set) } return c } func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { switch { case o.set.Len() == 0: case c.attrs.Len() == 0: c.attrs = o.set default: c.attrs = mergeSets(c.attrs, o.set) } return c } func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { switch { case o.set.Len() == 0: case c.attrs.Len() == 0: c.attrs = o.set default: c.attrs = mergeSets(c.attrs, o.set) } return c } // WithAttributeSet sets the attribute Set associated with a measurement is // made with. // // If multiple WithAttributeSet or WithAttributes options are passed the // attributes will be merged together in the order they are passed. Attributes // with duplicate keys will use the last value passed. func WithAttributeSet(attributes attribute.Set) MeasurementOption { return attrOpt{set: attributes} } // WithAttributes converts attributes into an attribute Set and sets the Set to // be associated with a measurement. This is shorthand for: // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) // WithAttributes(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is // concurrent safe. This makes this option function less optimized in // comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be // preferred for performance sensitive code. // // See [WithAttributeSet] for information about how multiple WithAttributes are // merged. func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { cp := make([]attribute.KeyValue, len(attributes)) copy(cp, attributes) return attrOpt{set: attribute.NewSet(cp...)} } opentelemetry-go-1.21.0/metric/instrument_test.go000066400000000000000000000076261452547353200222000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "sync" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" ) type attrConf interface { Attributes() attribute.Set } func TestConfigAttrs(t *testing.T) { t.Run("AddConfig", testConfAttr(func(mo ...MeasurementOption) attrConf { opts := make([]AddOption, len(mo)) for i := range mo { opts[i] = mo[i].(AddOption) } return NewAddConfig(opts) })) t.Run("RecordConfig", testConfAttr(func(mo ...MeasurementOption) attrConf { opts := make([]RecordOption, len(mo)) for i := range mo { opts[i] = mo[i].(RecordOption) } return NewRecordConfig(opts) })) t.Run("ObserveConfig", testConfAttr(func(mo ...MeasurementOption) attrConf { opts := make([]ObserveOption, len(mo)) for i := range mo { opts[i] = mo[i].(ObserveOption) } return NewObserveConfig(opts) })) } func testConfAttr(newConf func(...MeasurementOption) attrConf) func(t *testing.T) { return func(t *testing.T) { t.Run("ZeroConfigEmpty", func(t *testing.T) { c := newConf() assert.Equal(t, *attribute.EmptySet(), c.Attributes()) }) t.Run("EmptySet", func(t *testing.T) { c := newConf(WithAttributeSet(*attribute.EmptySet())) assert.Equal(t, *attribute.EmptySet(), c.Attributes()) }) aliceAttr := attribute.String("user", "Alice") alice := attribute.NewSet(aliceAttr) t.Run("SingleWithAttributeSet", func(t *testing.T) { c := newConf(WithAttributeSet(alice)) assert.Equal(t, alice, c.Attributes()) }) t.Run("SingleWithAttributes", func(t *testing.T) { c := newConf(WithAttributes(aliceAttr)) assert.Equal(t, alice, c.Attributes()) }) bobAttr := attribute.String("user", "Bob") bob := attribute.NewSet(bobAttr) t.Run("MultiWithAttributeSet", func(t *testing.T) { c := newConf(WithAttributeSet(alice), WithAttributeSet(bob)) assert.Equal(t, bob, c.Attributes()) }) t.Run("MergedWithAttributes", func(t *testing.T) { c := newConf(WithAttributes(aliceAttr, bobAttr)) assert.Equal(t, bob, c.Attributes()) }) t.Run("MultiWithAttributeSet", func(t *testing.T) { c := newConf(WithAttributes(aliceAttr), WithAttributes(bobAttr)) assert.Equal(t, bob, c.Attributes()) }) t.Run("MergedEmpty", func(t *testing.T) { c := newConf(WithAttributeSet(alice), WithAttributeSet(*attribute.EmptySet())) assert.Equal(t, alice, c.Attributes()) }) } } func TestWithAttributesConcurrentSafe(t *testing.T) { attrs := []attribute.KeyValue{ attribute.String("user", "Alice"), attribute.Bool("admin", true), attribute.String("user", "Bob"), } var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() opt := []AddOption{WithAttributes(attrs...)} _ = NewAddConfig(opt) }() wg.Add(1) go func() { defer wg.Done() opt := []AddOption{WithAttributes(attrs...)} _ = NewAddConfig(opt) }() wg.Add(1) go func() { defer wg.Done() opt := []RecordOption{WithAttributes(attrs...)} _ = NewRecordConfig(opt) }() wg.Add(1) go func() { defer wg.Done() opt := []RecordOption{WithAttributes(attrs...)} _ = NewRecordConfig(opt) }() wg.Add(1) go func() { defer wg.Done() opt := []ObserveOption{WithAttributes(attrs...)} _ = NewObserveConfig(opt) }() wg.Add(1) go func() { defer wg.Done() opt := []ObserveOption{WithAttributes(attrs...)} _ = NewObserveConfig(opt) }() wg.Wait() } opentelemetry-go-1.21.0/metric/meter.go000066400000000000000000000255201452547353200200360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "context" "go.opentelemetry.io/otel/metric/embedded" ) // MeterProvider provides access to named Meter instances, for instrumenting // an application or package. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type MeterProvider interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.MeterProvider // Meter returns a new Meter with the provided name and configuration. // // A Meter should be scoped at most to a single package. The name needs to // be unique so it does not collide with other names used by // an application, nor other applications. To achieve this, the import path // of the instrumentation package is recommended to be used as name. // // If the name is empty, then an implementation defined default name will // be used instead. Meter(name string, opts ...MeterOption) Meter } // Meter provides access to instrument instances for recording metrics. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Meter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Meter // Int64Counter returns a new Int64Counter instrument identified by name // and configured with options. The instrument is used to synchronously // record increasing int64 measurements during a computational operation. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational // operation. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a // computational operation. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a // measurement collection cycle. // // Measurements for the returned instrument are made via a callback. Use // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per // a measurement collection cycle. // // Measurements for the returned instrument are made via a callback. Use // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a // measurement collection cycle. // // Measurements for the returned instrument are made via a callback. Use // the WithInt64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) // Float64Counter returns a new Float64Counter instrument identified by // name and configured with options. The instrument is used to // synchronously record increasing float64 measurements during a // computational operation. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational // operation. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a // computational operation. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 // measurements once per a measurement collection cycle. // // Measurements for the returned instrument are made via a callback. Use // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record // float64 measurements once per a measurement collection cycle. // // Measurements for the returned instrument are made via a callback. Use // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a // measurement collection cycle. // // Measurements for the returned instrument are made via a callback. Use // the WithFloat64Callback option to register the callback here, or use the // RegisterCallback method of this Meter to register one later. See the // Measurements section of the package documentation for more information. Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) // RegisterCallback registers f to be called during the collection of a // measurement cycle. // // If Unregister of the returned Registration is called, f needs to be // unregistered and not called during collection. // // The instruments f is registered with are the only instruments that f may // observe values for. // // If no instruments are passed, f should not be registered nor called // during collection. // // The function f needs to be concurrent safe. RegisterCallback(f Callback, instruments ...Observable) (Registration, error) } // Callback is a function registered with a Meter that makes observations for // the set of instruments it is registered with. The Observer parameter is used // to record measurement observations for these instruments. // // The function needs to complete in a finite amount of time and the deadline // of the passed context is expected to be honored. // // The function needs to make unique observations across all registered // Callbacks. Meaning, it should not report measurements for an instrument with // the same attributes as another Callback will report. // // The function needs to be concurrent safe. type Callback func(context.Context, Observer) error // Observer records measurements for multiple instruments in a Callback. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Observer interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Observer // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } // Registration is an token representing the unique registration of a callback // for a set of instruments with a Meter. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Registration interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Registration // Unregister removes the callback registration from a Meter. // // This method needs to be idempotent and concurrent safe. Unregister() error } opentelemetry-go-1.21.0/metric/noop/000077500000000000000000000000001452547353200173425ustar00rootroot00000000000000opentelemetry-go-1.21.0/metric/noop/noop.go000066400000000000000000000250551452547353200206530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package noop provides an implementation of the OpenTelemetry metric API that // produces no telemetry and minimizes used computation resources. // // Using this package to implement the OpenTelemetry metric API will // effectively disable OpenTelemetry. // // This implementation can be embedded in other implementations of the // OpenTelemetry metric API. Doing so will mean the implementation defaults to // no operation for methods it does not implement. package noop // import "go.opentelemetry.io/otel/metric/noop" import ( "context" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" ) var ( // Compile-time check this implements the OpenTelemetry API. _ metric.MeterProvider = MeterProvider{} _ metric.Meter = Meter{} _ metric.Observer = Observer{} _ metric.Registration = Registration{} _ metric.Int64Counter = Int64Counter{} _ metric.Float64Counter = Float64Counter{} _ metric.Int64UpDownCounter = Int64UpDownCounter{} _ metric.Float64UpDownCounter = Float64UpDownCounter{} _ metric.Int64Histogram = Int64Histogram{} _ metric.Float64Histogram = Float64Histogram{} _ metric.Int64ObservableCounter = Int64ObservableCounter{} _ metric.Float64ObservableCounter = Float64ObservableCounter{} _ metric.Int64ObservableGauge = Int64ObservableGauge{} _ metric.Float64ObservableGauge = Float64ObservableGauge{} _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} _ metric.Int64Observer = Int64Observer{} _ metric.Float64Observer = Float64Observer{} ) // MeterProvider is an OpenTelemetry No-Op MeterProvider. type MeterProvider struct{ embedded.MeterProvider } // NewMeterProvider returns a MeterProvider that does not record any telemetry. func NewMeterProvider() MeterProvider { return MeterProvider{} } // Meter returns an OpenTelemetry Meter that does not record any telemetry. func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { return Meter{} } // Meter is an OpenTelemetry No-Op Meter. type Meter struct{ embedded.Meter } // Int64Counter returns a Counter used to record int64 measurements that // produces no telemetry. func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { return Int64Counter{}, nil } // Int64UpDownCounter returns an UpDownCounter used to record int64 // measurements that produces no telemetry. func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { return Int64UpDownCounter{}, nil } // Int64Histogram returns a Histogram used to record int64 measurements that // produces no telemetry. func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { return Int64Histogram{}, nil } // Int64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { return Int64ObservableCounter{}, nil } // Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { return Int64ObservableUpDownCounter{}, nil } // Int64ObservableGauge returns an ObservableGauge used to record int64 // measurements that produces no telemetry. func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { return Int64ObservableGauge{}, nil } // Float64Counter returns a Counter used to record int64 measurements that // produces no telemetry. func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { return Float64Counter{}, nil } // Float64UpDownCounter returns an UpDownCounter used to record int64 // measurements that produces no telemetry. func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { return Float64UpDownCounter{}, nil } // Float64Histogram returns a Histogram used to record int64 measurements that // produces no telemetry. func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { return Float64Histogram{}, nil } // Float64ObservableCounter returns an ObservableCounter used to record int64 // measurements that produces no telemetry. func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { return Float64ObservableCounter{}, nil } // Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to // record int64 measurements that produces no telemetry. func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { return Float64ObservableUpDownCounter{}, nil } // Float64ObservableGauge returns an ObservableGauge used to record int64 // measurements that produces no telemetry. func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { return Float64ObservableGauge{}, nil } // RegisterCallback performs no operation. func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { return Registration{}, nil } // Observer acts as a recorder of measurements for multiple instruments in a // Callback, it performing no operation. type Observer struct{ embedded.Observer } // ObserveFloat64 performs no operation. func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { } // ObserveInt64 performs no operation. func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { } // Registration is the registration of a Callback with a No-Op Meter. type Registration struct{ embedded.Registration } // Unregister unregisters the Callback the Registration represents with the // No-Op Meter. This will always return nil because the No-Op Meter performs no // operation, including hold any record of registrations. func (Registration) Unregister() error { return nil } // Int64Counter is an OpenTelemetry Counter used to record int64 measurements. // It produces no telemetry. type Int64Counter struct{ embedded.Int64Counter } // Add performs no operation. func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} // Float64Counter is an OpenTelemetry Counter used to record float64 // measurements. It produces no telemetry. type Float64Counter struct{ embedded.Float64Counter } // Add performs no operation. func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} // Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 // measurements. It produces no telemetry. type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } // Add performs no operation. func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} // Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record // float64 measurements. It produces no telemetry. type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } // Add performs no operation. func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} // Int64Histogram is an OpenTelemetry Histogram used to record int64 // measurements. It produces no telemetry. type Int64Histogram struct{ embedded.Int64Histogram } // Record performs no operation. func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} // Float64Histogram is an OpenTelemetry Histogram used to record float64 // measurements. It produces no telemetry. type Float64Histogram struct{ embedded.Float64Histogram } // Record performs no operation. func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} // Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record // int64 measurements. It produces no telemetry. type Int64ObservableCounter struct { metric.Int64Observable embedded.Int64ObservableCounter } // Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record // float64 measurements. It produces no telemetry. type Float64ObservableCounter struct { metric.Float64Observable embedded.Float64ObservableCounter } // Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record // int64 measurements. It produces no telemetry. type Int64ObservableGauge struct { metric.Int64Observable embedded.Int64ObservableGauge } // Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record // float64 measurements. It produces no telemetry. type Float64ObservableGauge struct { metric.Float64Observable embedded.Float64ObservableGauge } // Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter // used to record int64 measurements. It produces no telemetry. type Int64ObservableUpDownCounter struct { metric.Int64Observable embedded.Int64ObservableUpDownCounter } // Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter // used to record float64 measurements. It produces no telemetry. type Float64ObservableUpDownCounter struct { metric.Float64Observable embedded.Float64ObservableUpDownCounter } // Int64Observer is a recorder of int64 measurements that performs no operation. type Int64Observer struct{ embedded.Int64Observer } // Observe performs no operation. func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} // Float64Observer is a recorder of float64 measurements that performs no // operation. type Float64Observer struct{ embedded.Float64Observer } // Observe performs no operation. func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} opentelemetry-go-1.21.0/metric/noop/noop_test.go000066400000000000000000000113271452547353200217070ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package noop // import "go.opentelemetry.io/otel/metric/noop" import ( "reflect" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/metric" ) func TestImplementationNoPanics(t *testing.T) { // Check that if type has an embedded interface and that interface has // methods added to it than the No-Op implementation implements them. t.Run("MeterProvider", assertAllExportedMethodNoPanic( reflect.ValueOf(MeterProvider{}), reflect.TypeOf((*metric.MeterProvider)(nil)).Elem(), )) t.Run("Meter", assertAllExportedMethodNoPanic( reflect.ValueOf(Meter{}), reflect.TypeOf((*metric.Meter)(nil)).Elem(), )) t.Run("Observer", assertAllExportedMethodNoPanic( reflect.ValueOf(Observer{}), reflect.TypeOf((*metric.Observer)(nil)).Elem(), )) t.Run("Registration", assertAllExportedMethodNoPanic( reflect.ValueOf(Registration{}), reflect.TypeOf((*metric.Registration)(nil)).Elem(), )) t.Run("Int64Counter", assertAllExportedMethodNoPanic( reflect.ValueOf(Int64Counter{}), reflect.TypeOf((*metric.Int64Counter)(nil)).Elem(), )) t.Run("Float64Counter", assertAllExportedMethodNoPanic( reflect.ValueOf(Float64Counter{}), reflect.TypeOf((*metric.Float64Counter)(nil)).Elem(), )) t.Run("Int64UpDownCounter", assertAllExportedMethodNoPanic( reflect.ValueOf(Int64UpDownCounter{}), reflect.TypeOf((*metric.Int64UpDownCounter)(nil)).Elem(), )) t.Run("Float64UpDownCounter", assertAllExportedMethodNoPanic( reflect.ValueOf(Float64UpDownCounter{}), reflect.TypeOf((*metric.Float64UpDownCounter)(nil)).Elem(), )) t.Run("Int64Histogram", assertAllExportedMethodNoPanic( reflect.ValueOf(Int64Histogram{}), reflect.TypeOf((*metric.Int64Histogram)(nil)).Elem(), )) t.Run("Float64Histogram", assertAllExportedMethodNoPanic( reflect.ValueOf(Float64Histogram{}), reflect.TypeOf((*metric.Float64Histogram)(nil)).Elem(), )) t.Run("Int64ObservableCounter", assertAllExportedMethodNoPanic( reflect.ValueOf(Int64ObservableCounter{}), reflect.TypeOf((*metric.Int64ObservableCounter)(nil)).Elem(), )) t.Run("Float64ObservableCounter", assertAllExportedMethodNoPanic( reflect.ValueOf(Float64ObservableCounter{}), reflect.TypeOf((*metric.Float64ObservableCounter)(nil)).Elem(), )) t.Run("Int64ObservableGauge", assertAllExportedMethodNoPanic( reflect.ValueOf(Int64ObservableGauge{}), reflect.TypeOf((*metric.Int64ObservableGauge)(nil)).Elem(), )) t.Run("Float64ObservableGauge", assertAllExportedMethodNoPanic( reflect.ValueOf(Float64ObservableGauge{}), reflect.TypeOf((*metric.Float64ObservableGauge)(nil)).Elem(), )) t.Run("Int64ObservableUpDownCounter", assertAllExportedMethodNoPanic( reflect.ValueOf(Int64ObservableUpDownCounter{}), reflect.TypeOf((*metric.Int64ObservableUpDownCounter)(nil)).Elem(), )) t.Run("Float64ObservableUpDownCounter", assertAllExportedMethodNoPanic( reflect.ValueOf(Float64ObservableUpDownCounter{}), reflect.TypeOf((*metric.Float64ObservableUpDownCounter)(nil)).Elem(), )) t.Run("Int64Observer", assertAllExportedMethodNoPanic( reflect.ValueOf(Int64Observer{}), reflect.TypeOf((*metric.Int64Observer)(nil)).Elem(), )) t.Run("Float64Observer", assertAllExportedMethodNoPanic( reflect.ValueOf(Float64Observer{}), reflect.TypeOf((*metric.Float64Observer)(nil)).Elem(), )) } func assertAllExportedMethodNoPanic(rVal reflect.Value, rType reflect.Type) func(*testing.T) { return func(t *testing.T) { for n := 0; n < rType.NumMethod(); n++ { mType := rType.Method(n) if !mType.IsExported() { t.Logf("ignoring unexported %s", mType.Name) continue } m := rVal.MethodByName(mType.Name) if !m.IsValid() { t.Errorf("unknown method for %s: %s", rVal.Type().Name(), mType.Name) } numIn := mType.Type.NumIn() if mType.Type.IsVariadic() { numIn-- } args := make([]reflect.Value, numIn) for i := range args { aType := mType.Type.In(i) args[i] = reflect.New(aType).Elem() } assert.NotPanicsf(t, func() { _ = m.Call(args) }, "%s.%s", rVal.Type().Name(), mType.Name) } } } func TestNewMeterProvider(t *testing.T) { mp := NewMeterProvider() assert.Equal(t, mp, MeterProvider{}) meter := mp.Meter("") assert.Equal(t, meter, Meter{}) } opentelemetry-go-1.21.0/metric/syncfloat64.go000066400000000000000000000145101452547353200210730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "context" "go.opentelemetry.io/otel/metric/embedded" ) // Float64Counter is an instrument that records increasing float64 values. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Float64Counter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Float64Counter // Add records a change to the counter. // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. Add(ctx context.Context, incr float64, options ...AddOption) } // Float64CounterConfig contains options for synchronous counter instruments that // record int64 values. type Float64CounterConfig struct { description string unit string } // NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts // applied. func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { var config Float64CounterConfig for _, o := range opts { config = o.applyFloat64Counter(config) } return config } // Description returns the configured description. func (c Float64CounterConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Float64CounterConfig) Unit() string { return c.unit } // Float64CounterOption applies options to a [Float64CounterConfig]. See // [InstrumentOption] for other options that can be used as a // Float64CounterOption. type Float64CounterOption interface { applyFloat64Counter(Float64CounterConfig) Float64CounterConfig } // Float64UpDownCounter is an instrument that records increasing or decreasing // float64 values. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Float64UpDownCounter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Float64UpDownCounter // Add records a change to the counter. // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. Add(ctx context.Context, incr float64, options ...AddOption) } // Float64UpDownCounterConfig contains options for synchronous counter // instruments that record int64 values. type Float64UpDownCounterConfig struct { description string unit string } // NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] // with all opts applied. func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { var config Float64UpDownCounterConfig for _, o := range opts { config = o.applyFloat64UpDownCounter(config) } return config } // Description returns the configured description. func (c Float64UpDownCounterConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Float64UpDownCounterConfig) Unit() string { return c.unit } // Float64UpDownCounterOption applies options to a // [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that // can be used as a Float64UpDownCounterOption. type Float64UpDownCounterOption interface { applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig } // Float64Histogram is an instrument that records a distribution of float64 // values. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Float64Histogram interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Float64Histogram // Record adds an additional value to the distribution. // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. Record(ctx context.Context, incr float64, options ...RecordOption) } // Float64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Float64HistogramConfig struct { description string unit string explicitBucketBoundaries []float64 } // NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all // opts applied. func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { var config Float64HistogramConfig for _, o := range opts { config = o.applyFloat64Histogram(config) } return config } // Description returns the configured description. func (c Float64HistogramConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Float64HistogramConfig) Unit() string { return c.unit } // ExplicitBucketBoundaries returns the configured explicit bucket boundaries. func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { return c.explicitBucketBoundaries } // Float64HistogramOption applies options to a [Float64HistogramConfig]. See // [InstrumentOption] for other options that can be used as a // Float64HistogramOption. type Float64HistogramOption interface { applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig } opentelemetry-go-1.21.0/metric/syncfloat64_test.go000066400000000000000000000032641452547353200221360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "testing" "github.com/stretchr/testify/assert" ) func TestFloat64Configuration(t *testing.T) { const ( token float64 = 43 desc = "Instrument description." uBytes = "By" ) run := func(got float64Config) func(*testing.T) { return func(t *testing.T) { assert.Equal(t, desc, got.Description(), "description") assert.Equal(t, uBytes, got.Unit(), "unit") } } t.Run("Float64Counter", run( NewFloat64CounterConfig(WithDescription(desc), WithUnit(uBytes)), )) t.Run("Float64UpDownCounter", run( NewFloat64UpDownCounterConfig(WithDescription(desc), WithUnit(uBytes)), )) t.Run("Float64Histogram", run( NewFloat64HistogramConfig(WithDescription(desc), WithUnit(uBytes)), )) } type float64Config interface { Description() string Unit() string } func TestFloat64ExplicitBucketHistogramConfiguration(t *testing.T) { bounds := []float64{0.1, 0.5, 1.0} got := NewFloat64HistogramConfig(WithExplicitBucketBoundaries(bounds...)) assert.Equal(t, bounds, got.ExplicitBucketBoundaries(), "boundaries") } opentelemetry-go-1.21.0/metric/syncint64.go000066400000000000000000000142771452547353200205720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "context" "go.opentelemetry.io/otel/metric/embedded" ) // Int64Counter is an instrument that records increasing int64 values. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Int64Counter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Int64Counter // Add records a change to the counter. // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. Add(ctx context.Context, incr int64, options ...AddOption) } // Int64CounterConfig contains options for synchronous counter instruments that // record int64 values. type Int64CounterConfig struct { description string unit string } // NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts // applied. func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { var config Int64CounterConfig for _, o := range opts { config = o.applyInt64Counter(config) } return config } // Description returns the configured description. func (c Int64CounterConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Int64CounterConfig) Unit() string { return c.unit } // Int64CounterOption applies options to a [Int64CounterConfig]. See // [InstrumentOption] for other options that can be used as an // Int64CounterOption. type Int64CounterOption interface { applyInt64Counter(Int64CounterConfig) Int64CounterConfig } // Int64UpDownCounter is an instrument that records increasing or decreasing // int64 values. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Int64UpDownCounter interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Int64UpDownCounter // Add records a change to the counter. // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. Add(ctx context.Context, incr int64, options ...AddOption) } // Int64UpDownCounterConfig contains options for synchronous counter // instruments that record int64 values. type Int64UpDownCounterConfig struct { description string unit string } // NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with // all opts applied. func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { var config Int64UpDownCounterConfig for _, o := range opts { config = o.applyInt64UpDownCounter(config) } return config } // Description returns the configured description. func (c Int64UpDownCounterConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Int64UpDownCounterConfig) Unit() string { return c.unit } // Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. // See [InstrumentOption] for other options that can be used as an // Int64UpDownCounterOption. type Int64UpDownCounterOption interface { applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig } // Int64Histogram is an instrument that records a distribution of int64 // values. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Int64Histogram interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Int64Histogram // Record adds an additional value to the distribution. // // Use the WithAttributeSet (or, if performance is not a concern, // the WithAttributes) option to include measurement attributes. Record(ctx context.Context, incr int64, options ...RecordOption) } // Int64HistogramConfig contains options for synchronous counter instruments // that record int64 values. type Int64HistogramConfig struct { description string unit string explicitBucketBoundaries []float64 } // NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts // applied. func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { var config Int64HistogramConfig for _, o := range opts { config = o.applyInt64Histogram(config) } return config } // Description returns the configured description. func (c Int64HistogramConfig) Description() string { return c.description } // Unit returns the configured unit. func (c Int64HistogramConfig) Unit() string { return c.unit } // ExplicitBucketBoundaries returns the configured explicit bucket boundaries. func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { return c.explicitBucketBoundaries } // Int64HistogramOption applies options to a [Int64HistogramConfig]. See // [InstrumentOption] for other options that can be used as an // Int64HistogramOption. type Int64HistogramOption interface { applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig } opentelemetry-go-1.21.0/metric/syncint64_test.go000066400000000000000000000032301452547353200216140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/metric" import ( "testing" "github.com/stretchr/testify/assert" ) func TestInt64Configuration(t *testing.T) { const ( token int64 = 43 desc = "Instrument description." uBytes = "By" ) run := func(got int64Config) func(*testing.T) { return func(t *testing.T) { assert.Equal(t, desc, got.Description(), "description") assert.Equal(t, uBytes, got.Unit(), "unit") } } t.Run("Int64Counter", run( NewInt64CounterConfig(WithDescription(desc), WithUnit(uBytes)), )) t.Run("Int64UpDownCounter", run( NewInt64UpDownCounterConfig(WithDescription(desc), WithUnit(uBytes)), )) t.Run("Int64Histogram", run( NewInt64HistogramConfig(WithDescription(desc), WithUnit(uBytes)), )) } type int64Config interface { Description() string Unit() string } func TestInt64ExplicitBucketHistogramConfiguration(t *testing.T) { bounds := []float64{0.1, 0.5, 1.0} got := NewInt64HistogramConfig(WithExplicitBucketBoundaries(bounds...)) assert.Equal(t, bounds, got.ExplicitBucketBoundaries(), "boundaries") } opentelemetry-go-1.21.0/metric_test.go000066400000000000000000000023761452547353200177650ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/metric/noop" ) type testMeterProvider struct{ embedded.MeterProvider } var _ metric.MeterProvider = &testMeterProvider{} func (*testMeterProvider) Meter(_ string, _ ...metric.MeterOption) metric.Meter { return noop.NewMeterProvider().Meter("") } func TestMultipleGlobalMeterProvider(t *testing.T) { p1 := testMeterProvider{} p2 := noop.NewMeterProvider() SetMeterProvider(&p1) SetMeterProvider(p2) got := GetMeterProvider() assert.Equal(t, p2, got) } opentelemetry-go-1.21.0/propagation.go000066400000000000000000000022031452547353200177530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/propagation" ) // GetTextMapPropagator returns the global TextMapPropagator. If none has been // set, a No-Op TextMapPropagator is returned. func GetTextMapPropagator() propagation.TextMapPropagator { return global.TextMapPropagator() } // SetTextMapPropagator sets propagator as the global TextMapPropagator. func SetTextMapPropagator(propagator propagation.TextMapPropagator) { global.SetTextMapPropagator(propagator) } opentelemetry-go-1.21.0/propagation/000077500000000000000000000000001452547353200174275ustar00rootroot00000000000000opentelemetry-go-1.21.0/propagation/baggage.go000066400000000000000000000033341452547353200213360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation // import "go.opentelemetry.io/otel/propagation" import ( "context" "go.opentelemetry.io/otel/baggage" ) const baggageHeader = "baggage" // Baggage is a propagator that supports the W3C Baggage format. // // This propagates user-defined baggage associated with a trace. The complete // specification is defined at https://www.w3.org/TR/baggage/. type Baggage struct{} var _ TextMapPropagator = Baggage{} // Inject sets baggage key-values from ctx into the carrier. func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { bStr := baggage.FromContext(ctx).String() if bStr != "" { carrier.Set(baggageHeader, bStr) } } // Extract returns a copy of parent with the baggage from the carrier added. func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { bStr := carrier.Get(baggageHeader) if bStr == "" { return parent } bag, err := baggage.Parse(bStr) if err != nil { return parent } return baggage.ContextWithBaggage(parent, bag) } // Fields returns the keys who's values are set with Inject. func (b Baggage) Fields() []string { return []string{baggageHeader} } opentelemetry-go-1.21.0/propagation/baggage_test.go000066400000000000000000000135141452547353200223760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation_test import ( "context" "net/http" "net/url" "strings" "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/propagation" ) type property struct { Key, Value string } type member struct { Key, Value string Properties []property } func (m member) Member(t *testing.T) baggage.Member { props := make([]baggage.Property, 0, len(m.Properties)) for _, p := range m.Properties { p, err := baggage.NewKeyValueProperty(p.Key, p.Value) if err != nil { t.Fatal(err) } props = append(props, p) } bMember, err := baggage.NewMember(m.Key, url.QueryEscape(m.Value), props...) if err != nil { t.Fatal(err) } return bMember } type members []member func (m members) Baggage(t *testing.T) baggage.Baggage { bMembers := make([]baggage.Member, 0, len(m)) for _, mem := range m { bMembers = append(bMembers, mem.Member(t)) } bag, err := baggage.New(bMembers...) if err != nil { t.Fatal(err) } return bag } func TestExtractValidBaggageFromHTTPReq(t *testing.T) { prop := propagation.TextMapPropagator(propagation.Baggage{}) tests := []struct { name string header string want members }{ { name: "valid w3cHeader", header: "key1=val1,key2=val2", want: members{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, }, }, { name: "valid w3cHeader with spaces", header: "key1 = val1, key2 =val2 ", want: members{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, }, }, { name: "valid w3cHeader with properties", header: "key1=val1,key2=val2;prop=1", want: members{ {Key: "key1", Value: "val1"}, { Key: "key2", Value: "val2", Properties: []property{ {Key: "prop", Value: "1"}, }, }, }, }, { name: "valid header with an invalid header", header: "key1=val1,key2=val2,a,val3", want: members{}, }, { name: "valid header with no value", header: "key1=,key2=val2", want: members{ {Key: "key1", Value: ""}, {Key: "key2", Value: "val2"}, }, }, { name: "valid header with url encoded string", header: "key1=val%252", want: members{ {Key: "key1", Value: "val%2"}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set("baggage", tt.header) ctx := context.Background() ctx = prop.Extract(ctx, propagation.HeaderCarrier(req.Header)) expected := tt.want.Baggage(t) assert.Equal(t, expected, baggage.FromContext(ctx)) }) } } func TestExtractInvalidDistributedContextFromHTTPReq(t *testing.T) { prop := propagation.TextMapPropagator(propagation.Baggage{}) tests := []struct { name string header string has members }{ { name: "no key values", header: "header1", }, { name: "invalid header with existing context", header: "header2", has: members{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, }, }, { name: "empty header value", header: "", has: members{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, }, }, { name: "with properties", header: "key1=val1,key2=val2;prop=1", has: members{ {Key: "key1", Value: "val1"}, { Key: "key2", Value: "val2", Properties: []property{ {Key: "prop", Value: "1"}, }, }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set("baggage", tt.header) expected := tt.has.Baggage(t) ctx := baggage.ContextWithBaggage(context.Background(), expected) ctx = prop.Extract(ctx, propagation.HeaderCarrier(req.Header)) assert.Equal(t, expected, baggage.FromContext(ctx)) }) } } func TestInjectBaggageToHTTPReq(t *testing.T) { propagator := propagation.Baggage{} tests := []struct { name string mems members wantInHeader []string }{ { name: "two simple values", mems: members{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, }, wantInHeader: []string{"key1=val1", "key2=val2"}, }, { name: "values with escaped chars", mems: members{ {Key: "key2", Value: "val3=4"}, }, wantInHeader: []string{"key2=val3%3D4"}, }, { name: "with properties", mems: members{ {Key: "key1", Value: "val1"}, { Key: "key2", Value: "val2", Properties: []property{ {Key: "prop", Value: "1"}, }, }, }, wantInHeader: []string{ "key1=val1", "key2=val2;prop=1", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "http://example.com", nil) ctx := baggage.ContextWithBaggage(context.Background(), tt.mems.Baggage(t)) propagator.Inject(ctx, propagation.HeaderCarrier(req.Header)) got := strings.Split(req.Header.Get("baggage"), ",") assert.ElementsMatch(t, tt.wantInHeader, got) }) } } func TestBaggagePropagatorGetAllKeys(t *testing.T) { var propagator propagation.Baggage want := []string{"baggage"} got := propagator.Fields() if diff := cmp.Diff(got, want); diff != "" { t.Errorf("GetAllKeys: -got +want %s", diff) } } opentelemetry-go-1.21.0/propagation/doc.go000066400000000000000000000017771452547353200205370ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package propagation contains OpenTelemetry context propagators. OpenTelemetry propagators are used to extract and inject context data from and into messages exchanged by applications. The propagator supported by this package is the W3C Trace Context encoding (https://www.w3.org/TR/trace-context/), and W3C Baggage (https://www.w3.org/TR/baggage/). */ package propagation // import "go.opentelemetry.io/otel/propagation" opentelemetry-go-1.21.0/propagation/propagation.go000066400000000000000000000117211452547353200223030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation // import "go.opentelemetry.io/otel/propagation" import ( "context" "net/http" ) // TextMapCarrier is the storage medium used by a TextMapPropagator. type TextMapCarrier interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Get returns the value associated with the passed key. Get(key string) string // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Set stores the key-value pair. Set(key string, value string) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Keys lists the keys stored in this carrier. Keys() []string // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } // MapCarrier is a TextMapCarrier that uses a map held in memory as a storage // medium for propagated key-value pairs. type MapCarrier map[string]string // Compile time check that MapCarrier implements the TextMapCarrier. var _ TextMapCarrier = MapCarrier{} // Get returns the value associated with the passed key. func (c MapCarrier) Get(key string) string { return c[key] } // Set stores the key-value pair. func (c MapCarrier) Set(key, value string) { c[key] = value } // Keys lists the keys stored in this carrier. func (c MapCarrier) Keys() []string { keys := make([]string, 0, len(c)) for k := range c { keys = append(keys, k) } return keys } // HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. type HeaderCarrier http.Header // Get returns the value associated with the passed key. func (hc HeaderCarrier) Get(key string) string { return http.Header(hc).Get(key) } // Set stores the key-value pair. func (hc HeaderCarrier) Set(key string, value string) { http.Header(hc).Set(key, value) } // Keys lists the keys stored in this carrier. func (hc HeaderCarrier) Keys() []string { keys := make([]string, 0, len(hc)) for k := range hc { keys = append(keys, k) } return keys } // TextMapPropagator propagates cross-cutting concerns as key-value text // pairs within a carrier that travels in-band across process boundaries. type TextMapPropagator interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Inject set cross-cutting concerns from the Context into the carrier. Inject(ctx context.Context, carrier TextMapCarrier) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Extract reads cross-cutting concerns from the carrier into a Context. Extract(ctx context.Context, carrier TextMapCarrier) context.Context // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Fields returns the keys whose values are set with Inject. Fields() []string // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } type compositeTextMapPropagator []TextMapPropagator func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { for _, i := range p { i.Inject(ctx, carrier) } } func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { for _, i := range p { ctx = i.Extract(ctx, carrier) } return ctx } func (p compositeTextMapPropagator) Fields() []string { unique := make(map[string]struct{}) for _, i := range p { for _, k := range i.Fields() { unique[k] = struct{}{} } } fields := make([]string, 0, len(unique)) for k := range unique { fields = append(fields, k) } return fields } // NewCompositeTextMapPropagator returns a unified TextMapPropagator from the // group of passed TextMapPropagator. This allows different cross-cutting // concerns to be propagates in a unified manner. // // The returned TextMapPropagator will inject and extract cross-cutting // concerns in the order the TextMapPropagators were provided. Additionally, // the Fields method will return a de-duplicated slice of the keys that are // set with the Inject method. func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { return compositeTextMapPropagator(p) } opentelemetry-go-1.21.0/propagation/propagation_test.go000066400000000000000000000064131452547353200233440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation_test import ( "context" "sort" "strings" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/propagation" ) type ctxKeyType uint var ctxKey ctxKeyType type carrier []string func (c *carrier) Keys() []string { return nil } func (c *carrier) Get(string) string { return "" } func (c *carrier) Set(setter, _ string) { *c = append(*c, setter) } type propagator struct { Name string } func (p propagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { carrier.Set(p.Name, "") } func (p propagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { v := ctx.Value(ctxKey) if v == nil { ctx = context.WithValue(ctx, ctxKey, []string{p.Name}) } else { orig := v.([]string) ctx = context.WithValue(ctx, ctxKey, append(orig, p.Name)) } return ctx } func (p propagator) Fields() []string { return []string{p.Name} } func TestCompositeTextMapPropagatorFields(t *testing.T) { a, b1, b2 := propagator{"a"}, propagator{"b"}, propagator{"b"} want := map[string]struct{}{ "a": {}, "b": {}, } got := propagation.NewCompositeTextMapPropagator(a, b1, b2).Fields() if len(got) != len(want) { t.Fatalf("invalid fields from composite: %v (want %v)", got, want) } for _, v := range got { if _, ok := want[v]; !ok { t.Errorf("invalid field returned from composite: %q", v) } } } func TestCompositeTextMapPropagatorInject(t *testing.T) { a, b := propagator{"a"}, propagator{"b"} c := make(carrier, 0, 2) propagation.NewCompositeTextMapPropagator(a, b).Inject(context.Background(), &c) if got := strings.Join([]string(c), ","); got != "a,b" { t.Errorf("invalid inject order: %s", got) } } func TestCompositeTextMapPropagatorExtract(t *testing.T) { a, b := propagator{"a"}, propagator{"b"} ctx := context.Background() ctx = propagation.NewCompositeTextMapPropagator(a, b).Extract(ctx, nil) v := ctx.Value(ctxKey) if v == nil { t.Fatal("no composite extraction") } if got := strings.Join(v.([]string), ","); got != "a,b" { t.Errorf("invalid extract order: %s", got) } } func TestMapCarrierGet(t *testing.T) { carrier := propagation.MapCarrier{ "foo": "bar", "baz": "qux", } assert.Equal(t, carrier.Get("foo"), "bar") assert.Equal(t, carrier.Get("baz"), "qux") } func TestMapCarrierSet(t *testing.T) { carrier := make(propagation.MapCarrier) carrier.Set("foo", "bar") carrier.Set("baz", "qux") assert.Equal(t, carrier["foo"], "bar") assert.Equal(t, carrier["baz"], "qux") } func TestMapCarrierKeys(t *testing.T) { carrier := propagation.MapCarrier{ "foo": "bar", "baz": "qux", } keys := carrier.Keys() sort.Strings(keys) assert.Equal(t, []string{"baz", "foo"}, keys) } opentelemetry-go-1.21.0/propagation/propagators_test.go000066400000000000000000000064441452547353200233660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation_test import ( "context" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) const ( traceIDStr = "4bf92f3577b34da6a3ce929d0e0e4736" spanIDStr = "00f067aa0ba902b7" ) var ( traceID = mustTraceIDFromHex(traceIDStr) spanID = mustSpanIDFromHex(spanIDStr) ) func mustTraceIDFromHex(s string) (t trace.TraceID) { var err error t, err = trace.TraceIDFromHex(s) if err != nil { panic(err) } return } func mustSpanIDFromHex(s string) (t trace.SpanID) { var err error t, err = trace.SpanIDFromHex(s) if err != nil { panic(err) } return } type outOfThinAirPropagator struct { t *testing.T } var _ propagation.TextMapPropagator = outOfThinAirPropagator{} func (p outOfThinAirPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceFlags: 0, }) require.True(p.t, sc.IsValid()) return trace.ContextWithRemoteSpanContext(ctx, sc) } func (outOfThinAirPropagator) Inject(context.Context, propagation.TextMapCarrier) {} func (outOfThinAirPropagator) Fields() []string { return nil } type nilCarrier struct{} var _ propagation.TextMapCarrier = nilCarrier{} func (nilCarrier) Keys() []string { return nil } func (nilCarrier) Get(key string) string { return "" } func (nilCarrier) Set(key string, value string) {} func TestMultiplePropagators(t *testing.T) { ootaProp := outOfThinAirPropagator{t: t} ns := nilCarrier{} testProps := []propagation.TextMapPropagator{ propagation.TraceContext{}, } bg := context.Background() // sanity check of oota propagator, ensuring that it really // generates the valid span context out of thin air { ctx := ootaProp.Extract(bg, ns) sc := trace.SpanContextFromContext(ctx) require.True(t, sc.IsValid(), "oota prop failed sanity check") require.True(t, sc.IsRemote(), "oota prop is remote") } // sanity check for real propagators, ensuring that they // really are not putting any valid span context into an empty // go context in absence of the HTTP headers. for _, prop := range testProps { ctx := prop.Extract(bg, ns) sc := trace.SpanContextFromContext(ctx) require.Falsef(t, sc.IsValid(), "%#v failed sanity check", prop) require.Falsef(t, sc.IsRemote(), "%#v prop set a remote", prop) } for _, prop := range testProps { props := propagation.NewCompositeTextMapPropagator(ootaProp, prop) ctx := props.Extract(bg, ns) sc := trace.SpanContextFromContext(ctx) assert.Truef(t, sc.IsRemote(), "%#v prop is remote", prop) assert.Truef(t, sc.IsValid(), "%#v clobbers span context", prop) } } opentelemetry-go-1.21.0/propagation/trace_context.go000066400000000000000000000107501452547353200226230ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation // import "go.opentelemetry.io/otel/propagation" import ( "context" "encoding/hex" "fmt" "regexp" "go.opentelemetry.io/otel/trace" ) const ( supportedVersion = 0 maxVersion = 254 traceparentHeader = "traceparent" tracestateHeader = "tracestate" ) // TraceContext is a propagator that supports the W3C Trace Context format // (https://www.w3.org/TR/trace-context/) // // This propagator will propagate the traceparent and tracestate headers to // guarantee traces are not broken. It is up to the users of this propagator // to choose if they want to participate in a trace by modifying the // traceparent header and relevant parts of the tracestate header containing // their proprietary information. type TraceContext struct{} var ( _ TextMapPropagator = TraceContext{} traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") ) // Inject set tracecontext from the Context into the carrier. func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { sc := trace.SpanContextFromContext(ctx) if !sc.IsValid() { return } if ts := sc.TraceState().String(); ts != "" { carrier.Set(tracestateHeader, ts) } // Clear all flags other than the trace-context supported sampling bit. flags := sc.TraceFlags() & trace.FlagsSampled h := fmt.Sprintf("%.2x-%s-%s-%s", supportedVersion, sc.TraceID(), sc.SpanID(), flags) carrier.Set(traceparentHeader, h) } // Extract reads tracecontext from the carrier into a returned Context. // // The returned Context will be a copy of ctx and contain the extracted // tracecontext as the remote SpanContext. If the extracted tracecontext is // invalid, the passed ctx will be returned directly instead. func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { sc := tc.extract(carrier) if !sc.IsValid() { return ctx } return trace.ContextWithRemoteSpanContext(ctx, sc) } func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { h := carrier.Get(traceparentHeader) if h == "" { return trace.SpanContext{} } matches := traceCtxRegExp.FindStringSubmatch(h) if len(matches) == 0 { return trace.SpanContext{} } if len(matches) < 5 { // four subgroups plus the overall match return trace.SpanContext{} } if len(matches[1]) != 2 { return trace.SpanContext{} } ver, err := hex.DecodeString(matches[1]) if err != nil { return trace.SpanContext{} } version := int(ver[0]) if version > maxVersion { return trace.SpanContext{} } if version == 0 && len(matches) != 5 { // four subgroups plus the overall match return trace.SpanContext{} } if len(matches[2]) != 32 { return trace.SpanContext{} } var scc trace.SpanContextConfig scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) if err != nil { return trace.SpanContext{} } if len(matches[3]) != 16 { return trace.SpanContext{} } scc.SpanID, err = trace.SpanIDFromHex(matches[3]) if err != nil { return trace.SpanContext{} } if len(matches[4]) != 2 { return trace.SpanContext{} } opts, err := hex.DecodeString(matches[4]) if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { return trace.SpanContext{} } // Clear all flags other than the trace-context supported sampling bit. scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // Ignore the error returned here. Failure to parse tracestate MUST NOT // affect the parsing of traceparent according to the W3C tracecontext // specification. scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) scc.Remote = true sc := trace.NewSpanContext(scc) if !sc.IsValid() { return trace.SpanContext{} } return sc } // Fields returns the keys who's values are set with Inject. func (tc TraceContext) Fields() []string { return []string{traceparentHeader, tracestateHeader} } opentelemetry-go-1.21.0/propagation/trace_context_benchmark_test.go000066400000000000000000000050421452547353200256720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation_test import ( "context" "net/http" "testing" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) func BenchmarkInject(b *testing.B) { var t propagation.TraceContext injectSubBenchmarks(b, func(ctx context.Context, b *testing.B) { h := http.Header{} b.ResetTimer() for i := 0; i < b.N; i++ { t.Inject(ctx, propagation.HeaderCarrier(h)) } }) } func injectSubBenchmarks(b *testing.B, fn func(context.Context, *testing.B)) { b.Run("SampledSpanContext", func(b *testing.B) { b.ReportAllocs() sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceFlags: trace.FlagsSampled, }) ctx := trace.ContextWithRemoteSpanContext(context.Background(), sc) fn(ctx, b) }) b.Run("WithoutSpanContext", func(b *testing.B) { b.ReportAllocs() ctx := context.Background() fn(ctx, b) }) } func BenchmarkExtract(b *testing.B) { extractSubBenchmarks(b, func(b *testing.B, req *http.Request) { var propagator propagation.TraceContext ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { propagator.Extract(ctx, propagation.HeaderCarrier(req.Header)) } }) } func extractSubBenchmarks(b *testing.B, fn func(*testing.B, *http.Request)) { b.Run("Sampled", func(b *testing.B) { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set("traceparent", "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01") b.ReportAllocs() fn(b, req) }) b.Run("BogusVersion", func(b *testing.B) { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set("traceparent", "qw-00000000000000000000000000000000-0000000000000000-01") b.ReportAllocs() fn(b, req) }) b.Run("FutureAdditionalData", func(b *testing.B) { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set("traceparent", "02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-XYZxsf09") b.ReportAllocs() fn(b, req) }) } opentelemetry-go-1.21.0/propagation/trace_context_example_test.go000066400000000000000000000015231452547353200253730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation_test import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/propagation" ) func ExampleTraceContext() { tc := propagation.TraceContext{} // Register the TraceContext propagator globally. otel.SetTextMapPropagator(tc) } opentelemetry-go-1.21.0/propagation/trace_context_test.go000066400000000000000000000225561452547353200236710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation_test import ( "context" "net/http" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) var ( traceparent = http.CanonicalHeaderKey("traceparent") tracestate = http.CanonicalHeaderKey("tracestate") prop = propagation.TraceContext{} ) type testcase struct { name string header http.Header sc trace.SpanContext } func TestExtractValidTraceContext(t *testing.T) { stateStr := "key1=value1,key2=value2" state, err := trace.ParseTraceState(stateStr) require.NoError(t, err) tests := []testcase{ { name: "not sampled", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: true, }), }, { name: "sampled", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceFlags: trace.FlagsSampled, Remote: true, }), }, { name: "valid tracestate", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00"}, tracestate: []string{stateStr}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceState: state, Remote: true, }), }, { name: "invalid tracestate preserves traceparent", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00"}, tracestate: []string{"invalid$@#=invalid"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: true, }), }, { name: "future version not sampled", header: http.Header{ traceparent: []string{"02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: true, }), }, { name: "future version sampled", header: http.Header{ traceparent: []string{"02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceFlags: trace.FlagsSampled, Remote: true, }), }, { name: "future version sample bit set", header: http.Header{ traceparent: []string{"02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceFlags: trace.FlagsSampled, Remote: true, }), }, { name: "future version sample bit not set", header: http.Header{ traceparent: []string{"02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-08"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: true, }), }, { name: "future version additional data", header: http.Header{ traceparent: []string{"02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00-XYZxsf09"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: true, }), }, { name: "B3 format ending in dash", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00-"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: true, }), }, { name: "future version B3 format ending in dash", header: http.Header{ traceparent: []string{"03-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00-"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: true, }), }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() ctx = prop.Extract(ctx, propagation.HeaderCarrier(tc.header)) assert.Equal(t, tc.sc, trace.SpanContextFromContext(ctx)) }) } } func TestExtractInvalidTraceContextFromHTTPReq(t *testing.T) { tests := []struct { name string header string }{ { name: "wrong version length", header: "0000-00000000000000000000000000000000-0000000000000000-01", }, { name: "wrong trace ID length", header: "00-ab00000000000000000000000000000000-cd00000000000000-01", }, { name: "wrong span ID length", header: "00-ab000000000000000000000000000000-cd0000000000000000-01", }, { name: "wrong trace flag length", header: "00-ab000000000000000000000000000000-cd00000000000000-0100", }, { name: "bogus version", header: "qw-00000000000000000000000000000000-0000000000000000-01", }, { name: "bogus trace ID", header: "00-qw000000000000000000000000000000-cd00000000000000-01", }, { name: "bogus span ID", header: "00-ab000000000000000000000000000000-qw00000000000000-01", }, { name: "bogus trace flag", header: "00-ab000000000000000000000000000000-cd00000000000000-qw", }, { name: "upper case version", header: "A0-00000000000000000000000000000000-0000000000000000-01", }, { name: "upper case trace ID", header: "00-AB000000000000000000000000000000-cd00000000000000-01", }, { name: "upper case span ID", header: "00-ab000000000000000000000000000000-CD00000000000000-01", }, { name: "upper case trace flag", header: "00-ab000000000000000000000000000000-cd00000000000000-A1", }, { name: "zero trace ID and span ID", header: "00-00000000000000000000000000000000-0000000000000000-01", }, { name: "trace-flag unused bits set", header: "00-ab000000000000000000000000000000-cd00000000000000-09", }, { name: "missing options", header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7", }, { name: "empty options", header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-", }, } empty := trace.SpanContext{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { h := http.Header{traceparent: []string{tt.header}} ctx := context.Background() ctx = prop.Extract(ctx, propagation.HeaderCarrier(h)) // Failure to extract needs to result in no SpanContext being set. // This cannot be directly measured, but we can check that an // zero-value SpanContext is returned from SpanContextFromContext. assert.Equal(t, empty, trace.SpanContextFromContext(ctx)) }) } } func TestInjectValidTraceContext(t *testing.T) { stateStr := "key1=value1,key2=value2" state, err := trace.ParseTraceState(stateStr) require.NoError(t, err) tests := []testcase{ { name: "not sampled", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: true, }), }, { name: "sampled", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceFlags: trace.FlagsSampled, Remote: true, }), }, { name: "unsupported trace flag bits dropped", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01"}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceFlags: 0xff, Remote: true, }), }, { name: "with tracestate", header: http.Header{ traceparent: []string{"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-00"}, tracestate: []string{stateStr}, }, sc: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceState: state, Remote: true, }), }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() ctx = trace.ContextWithRemoteSpanContext(ctx, tc.sc) h := http.Header{} prop.Inject(ctx, propagation.HeaderCarrier(h)) assert.Equal(t, tc.header, h) }) } } func TestInvalidSpanContextDropped(t *testing.T) { invalidSC := trace.SpanContext{} require.False(t, invalidSC.IsValid()) ctx := trace.ContextWithRemoteSpanContext(context.Background(), invalidSC) header := http.Header{} propagation.TraceContext{}.Inject(ctx, propagation.HeaderCarrier(header)) assert.Equal(t, "", header.Get("traceparent"), "injected invalid SpanContext") } func TestTraceContextFields(t *testing.T) { expected := []string{"traceparent", "tracestate"} assert.Equal(t, expected, propagation.TraceContext{}.Fields()) } opentelemetry-go-1.21.0/requirements.txt000066400000000000000000000000211452547353200203610ustar00rootroot00000000000000codespell==2.2.6 opentelemetry-go-1.21.0/schema/000077500000000000000000000000001452547353200163445ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/README.md000066400000000000000000000017621452547353200176310ustar00rootroot00000000000000# Telemetry Schema Files The `schema` module contains packages that help to parse and validate [schema files](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md). Each `major.minor` schema file format version is implemented as a separate package, with the name of the package in the `vmajor.minor` form. To parse a schema file, first decide what file format version you want to parse, then import the corresponding package and use the `Parse` or `ParseFile` functions like this: ```go import schema "go.opentelemetry.io/otel/schema/v1.1" // Load the schema from a file in v1.1.x file format. func loadSchemaFromFile() error { telSchema, err := schema.ParseFile("schema-file.yaml") if err != nil { return err } // Use telSchema struct here. } // Alternatively use schema.Parse to read the schema file from io.Reader. func loadSchemaFromReader(r io.Reader) error { telSchema, err := schema.Parse(r) if err != nil { return err } // Use telSchema struct here. } ``` opentelemetry-go-1.21.0/schema/go.mod000066400000000000000000000004211452547353200174470ustar00rootroot00000000000000module go.opentelemetry.io/otel/schema go 1.20 require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/stretchr/testify v1.8.4 gopkg.in/yaml.v3 v3.0.1 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect ) opentelemetry-go-1.21.0/schema/go.sum000066400000000000000000000020501452547353200174740ustar00rootroot00000000000000github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/schema/internal/000077500000000000000000000000001452547353200201605ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/internal/parser_checks.go000066400000000000000000000046721452547353200233340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/schema/internal" import ( "errors" "fmt" "net/url" "strconv" "strings" "github.com/Masterminds/semver/v3" ) // CheckFileFormatField validates the file format field according to the rules here: // https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md#schema-file-format-number func CheckFileFormatField(fileFormat string, supportedFormatMajor, supportedFormatMinor int) error { // Verify that the version number in the file is a semver. fileFormatParsed, err := semver.StrictNewVersion(fileFormat) if err != nil { return fmt.Errorf( "invalid schema file format version number %q (expected semver): %w", fileFormat, err, ) } // Check that the major version number in the file is the same as what we expect. if fileFormatParsed.Major() != uint64(supportedFormatMajor) { return fmt.Errorf( "this library cannot parse file formats with major version other than %v", supportedFormatMajor, ) } // Check that the file minor version number is not greater than // what is requested supports. if fileFormatParsed.Minor() > uint64(supportedFormatMinor) { supportedFormatMajorMinor := strconv.Itoa(supportedFormatMajor) + "." + strconv.Itoa(supportedFormatMinor) // 1.0 return fmt.Errorf( "unsupported schema file format minor version number, expected no newer than %v, got %v", supportedFormatMajorMinor+".x", fileFormat, ) } // Patch, prerelease and metadata version number does not matter, so we don't check it. return nil } // CheckSchemaURL verifies that schemaURL is valid. func CheckSchemaURL(schemaURL string) error { if strings.TrimSpace(schemaURL) == "" { return errors.New("schema_url field is missing") } if _, err := url.Parse(schemaURL); err != nil { return fmt.Errorf("invalid URL specified in schema_url field: %w", err) } return nil } opentelemetry-go-1.21.0/schema/internal/parser_checks_test.go000066400000000000000000000030051452547353200243600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/schema/internal" import ( "testing" "github.com/stretchr/testify/assert" ) func TestCheckFileFormatField(t *testing.T) { // Invalid file format version numbers. assert.Error(t, CheckFileFormatField("not a semver", 1, 0)) assert.Error(t, CheckFileFormatField("2.0.0", 1, 0)) assert.Error(t, CheckFileFormatField("1.1.0", 1, 0)) assert.Error(t, CheckFileFormatField("1.2.0", 1, 1)) // Valid cases. assert.NoError(t, CheckFileFormatField("1.0.0", 1, 0)) assert.NoError(t, CheckFileFormatField("1.0.1", 1, 0)) assert.NoError(t, CheckFileFormatField("1.0.10000-alpha+4857", 1, 0)) assert.NoError(t, CheckFileFormatField("1.0.0", 1, 1)) assert.NoError(t, CheckFileFormatField("1.0.1", 1, 1)) assert.NoError(t, CheckFileFormatField("1.0.10000-alpha+4857", 1, 1)) assert.NoError(t, CheckFileFormatField("1.1.0", 1, 1)) assert.NoError(t, CheckFileFormatField("1.1.1", 1, 1)) } opentelemetry-go-1.21.0/schema/v1.0/000077500000000000000000000000001452547353200170305ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/v1.0/ast/000077500000000000000000000000001452547353200176175ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/v1.0/ast/ast_schema.go000066400000000000000000000042321452547353200222560ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" import "go.opentelemetry.io/otel/schema/v1.0/types" // Schema represents a Schema file in FileFormat 1.0.0 as defined in // https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md type Schema struct { // Schema file format. SHOULD be 1.0.0 for the current specification version. // See https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md#schema-file-format-number FileFormat string `yaml:"file_format"` // Schema URL is an identifier of a Schema. The URL specifies a location of this // Schema File that can be retrieved (so it is a URL and not just a URI) using HTTP // or HTTPS protocol. // See https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md#schema-url SchemaURL string `yaml:"schema_url"` // Versions section that lists changes that happened in each particular version. Versions map[types.TelemetryVersion]VersionDef } // VersionDef corresponds to a section representing one version under the "versions" // top-level key. type VersionDef struct { All Attributes Resources Attributes Spans Spans SpanEvents SpanEvents `yaml:"span_events"` Logs Logs Metrics Metrics } // Attributes corresponds to a section representing a list of changes that // happened in a particular version. type Attributes struct { Changes []AttributeChange } // AttributeChange corresponds to a section representing attribute changes. type AttributeChange struct { RenameAttributes *RenameAttributes `yaml:"rename_attributes"` } opentelemetry-go-1.21.0/schema/v1.0/ast/common.go000066400000000000000000000020761452547353200214430ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" // RenameAttributes corresponds to a section that describes attribute renaming. type RenameAttributes struct { AttributeMap AttributeMap `yaml:"attribute_map"` } // AttributeMap corresponds to a section representing a mapping of attribute names. // The keys are the old attribute name used in the previous version, the values are the // new attribute name starting from this version. type AttributeMap map[string]string opentelemetry-go-1.21.0/schema/v1.0/ast/logs.go000066400000000000000000000017301452547353200211130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" // Logs corresponds to a section representing a list of changes that happened // to logs schema in a particular version. type Logs struct { Changes []LogsChange } // LogsChange corresponds to a section representing logs change. type LogsChange struct { RenameAttributes *RenameAttributes `yaml:"rename_attributes"` } opentelemetry-go-1.21.0/schema/v1.0/ast/metrics.go000066400000000000000000000026321452547353200216170ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" import "go.opentelemetry.io/otel/schema/v1.0/types" // Metrics corresponds to a section representing a list of changes that happened // to metrics schema in a particular version. type Metrics struct { Changes []MetricsChange } // MetricsChange corresponds to a section representing metrics change. type MetricsChange struct { RenameMetrics map[types.MetricName]types.MetricName `yaml:"rename_metrics"` RenameAttributes *AttributeMapForMetrics `yaml:"rename_attributes"` } // AttributeMapForMetrics corresponds to a section representing a translation of // attributes for specific metrics. type AttributeMapForMetrics struct { ApplyToMetrics []types.MetricName `yaml:"apply_to_metrics"` AttributeMap AttributeMap `yaml:"attribute_map"` } opentelemetry-go-1.21.0/schema/v1.0/ast/spans.go000066400000000000000000000043001452547353200212670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ast // import "go.opentelemetry.io/otel/schema/v1.0/ast" import "go.opentelemetry.io/otel/schema/v1.0/types" // Spans corresponds to a section representing a list of changes that happened // to spans schema in a particular version. type Spans struct { Changes []SpansChange } // SpanEvents corresponds to a section representing a list of changes that happened // to span events schema in a particular version. type SpanEvents struct { Changes []SpanEventsChange } // SpansChange corresponds to a section representing spans change. type SpansChange struct { RenameAttributes *AttributeMapForSpans `yaml:"rename_attributes"` } // AttributeMapForSpans corresponds to a section representing a translation of // attributes for specific spans. type AttributeMapForSpans struct { ApplyToSpans []types.SpanName `yaml:"apply_to_spans"` AttributeMap AttributeMap `yaml:"attribute_map"` } // SpanEventsChange corresponds to a section representing span events change. type SpanEventsChange struct { RenameEvents *RenameSpanEvents `yaml:"rename_events"` RenameAttributes *RenameSpanEventAttributes `yaml:"rename_attributes"` } // RenameSpanEvents corresponds to section representing a renaming of span events. type RenameSpanEvents struct { EventNameMap map[string]string `yaml:"name_map"` } // RenameSpanEventAttributes corresponds to section representing a renaming of // attributes of span events. type RenameSpanEventAttributes struct { ApplyToSpans []types.SpanName `yaml:"apply_to_spans"` ApplyToEvents []types.EventName `yaml:"apply_to_events"` AttributeMap AttributeMap `yaml:"attribute_map"` } opentelemetry-go-1.21.0/schema/v1.0/parser.go000066400000000000000000000033001452547353200206470ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package schema // import "go.opentelemetry.io/otel/schema/v1.0" import ( "io" "os" "gopkg.in/yaml.v3" "go.opentelemetry.io/otel/schema/internal" "go.opentelemetry.io/otel/schema/v1.0/ast" ) // Major file version number that this library supports. const supportedFormatMajor = 1 // Maximum minor version number that this library supports. const supportedFormatMinor = 0 // ParseFile a schema file. schemaFilePath is the file path. func ParseFile(schemaFilePath string) (*ast.Schema, error) { file, err := os.Open(schemaFilePath) if err != nil { return nil, err } return Parse(file) } // Parse a schema file. schemaFileContent is the readable content of the schema file. func Parse(schemaFileContent io.Reader) (*ast.Schema, error) { var ts ast.Schema d := yaml.NewDecoder(schemaFileContent) d.KnownFields(true) err := d.Decode(&ts) if err != nil { return nil, err } err = internal.CheckFileFormatField(ts.FileFormat, supportedFormatMajor, supportedFormatMinor) if err != nil { return nil, err } err = internal.CheckSchemaURL(ts.SchemaURL) if err != nil { return nil, err } return &ts, nil } opentelemetry-go-1.21.0/schema/v1.0/parser_test.go000066400000000000000000000126211452547353200217140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package schema import ( "bytes" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/schema/v1.0/ast" "go.opentelemetry.io/otel/schema/v1.0/types" ) func TestParseSchemaFile(t *testing.T) { ts, err := ParseFile("testdata/valid-example.yaml") assert.NoError(t, err) assert.NotNil(t, ts) assert.EqualValues( t, &ast.Schema{ FileFormat: "1.0.0", SchemaURL: "https://opentelemetry.io/schemas/1.1.0", Versions: map[types.TelemetryVersion]ast.VersionDef{ "1.0.0": {}, "1.1.0": { All: ast.Attributes{ Changes: []ast.AttributeChange{ { RenameAttributes: &ast.RenameAttributes{ AttributeMap: ast.AttributeMap{ "k8s.cluster.name": "kubernetes.cluster.name", "k8s.namespace.name": "kubernetes.namespace.name", "k8s.node.name": "kubernetes.node.name", "k8s.node.uid": "kubernetes.node.uid", "k8s.pod.name": "kubernetes.pod.name", "k8s.pod.uid": "kubernetes.pod.uid", "k8s.container.name": "kubernetes.container.name", "k8s.replicaset.name": "kubernetes.replicaset.name", "k8s.replicaset.uid": "kubernetes.replicaset.uid", "k8s.cronjob.name": "kubernetes.cronjob.name", "k8s.cronjob.uid": "kubernetes.cronjob.uid", "k8s.job.name": "kubernetes.job.name", "k8s.job.uid": "kubernetes.job.uid", "k8s.statefulset.name": "kubernetes.statefulset.name", "k8s.statefulset.uid": "kubernetes.statefulset.uid", "k8s.daemonset.name": "kubernetes.daemonset.name", "k8s.daemonset.uid": "kubernetes.daemonset.uid", "k8s.deployment.name": "kubernetes.deployment.name", "k8s.deployment.uid": "kubernetes.deployment.uid", "service.namespace": "service.namespace.name", }, }, }, }, }, Resources: ast.Attributes{ Changes: []ast.AttributeChange{ { RenameAttributes: &ast.RenameAttributes{ AttributeMap: ast.AttributeMap{ "telemetry.auto.version": "telemetry.auto_instr.version", }, }, }, }, }, Spans: ast.Spans{ Changes: []ast.SpansChange{ { RenameAttributes: &ast.AttributeMapForSpans{ AttributeMap: ast.AttributeMap{ "peer.service": "peer.service.name", }, ApplyToSpans: []types.SpanName{"HTTP GET"}, }, }, }, }, SpanEvents: ast.SpanEvents{ Changes: []ast.SpanEventsChange{ { RenameEvents: &ast.RenameSpanEvents{ EventNameMap: map[string]string{ "exception.stacktrace": "exception.stack_trace", }, }, }, { RenameAttributes: &ast.RenameSpanEventAttributes{ ApplyToEvents: []types.EventName{"exception.stack_trace"}, AttributeMap: ast.AttributeMap{ "peer.service": "peer.service.name", }, }, }, }, }, Logs: ast.Logs{ Changes: []ast.LogsChange{ { RenameAttributes: &ast.RenameAttributes{ AttributeMap: map[string]string{ "process.executable_name": "process.executable.name", }, }, }, }, }, Metrics: ast.Metrics{ Changes: []ast.MetricsChange{ { RenameAttributes: &ast.AttributeMapForMetrics{ AttributeMap: map[string]string{ "http.status_code": "http.response_status_code", }, }, }, { RenameMetrics: map[types.MetricName]types.MetricName{ "container.cpu.usage.total": "cpu.usage.total", "container.memory.usage.max": "memory.usage.max", }, }, { RenameAttributes: &ast.AttributeMapForMetrics{ ApplyToMetrics: []types.MetricName{ "system.cpu.utilization", "system.memory.usage", "system.memory.utilization", "system.paging.usage", }, AttributeMap: map[string]string{ "status": "state", }, }, }, }, }, }, }, }, ts, ) } func TestFailParseSchemaFile(t *testing.T) { ts, err := ParseFile("testdata/unsupported-file-format.yaml") assert.Error(t, err) assert.Nil(t, ts) ts, err = ParseFile("testdata/invalid-schema-url.yaml") assert.Error(t, err) assert.Nil(t, ts) ts, err = ParseFile("testdata/unknown-field.yaml") assert.ErrorContains(t, err, "field Resources not found in type ast.VersionDef") assert.Nil(t, ts) } func TestFailParseSchema(t *testing.T) { _, err := Parse(bytes.NewReader([]byte(""))) assert.Error(t, err) _, err = Parse(bytes.NewReader([]byte("invalid yaml"))) assert.Error(t, err) _, err = Parse(bytes.NewReader([]byte("file_format: 1.0.0"))) assert.Error(t, err) } opentelemetry-go-1.21.0/schema/v1.0/testdata/000077500000000000000000000000001452547353200206415ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/v1.0/testdata/invalid-schema-url.yaml000066400000000000000000000000761452547353200252140ustar00rootroot00000000000000file_format: 1.0.0 schema_url: http://invalid url versions: opentelemetry-go-1.21.0/schema/v1.0/testdata/unknown-field.yaml000066400000000000000000000005721452547353200243110ustar00rootroot00000000000000file_format: 1.0.0 schema_url: https://opentelemetry.io/schemas/1.0.0 versions: 1.1.0: all: # Valid entry. changes: - rename_attributes: k8s.cluster.name: kubernetes.cluster.name Resources: # Invalid uppercase. changes: - rename_attributes: attribute_map: browser.user_agent: user_agent.original 1.0.0: opentelemetry-go-1.21.0/schema/v1.0/testdata/unsupported-file-format.yaml000066400000000000000000000003161452547353200263200ustar00rootroot00000000000000file_format: 1.1.0 schema_url: https://opentelemetry.io/schemas/1.1.0 versions: 1.1.0: all: changes: - rename_attributes: k8s.cluster.name: kubernetes.cluster.name 1.0.0: opentelemetry-go-1.21.0/schema/v1.0/testdata/valid-example.yaml000066400000000000000000000135321452547353200242610ustar00rootroot00000000000000file_format: 1.0.0 schema_url: https://opentelemetry.io/schemas/1.1.0 versions: 1.1.0: # Section "all" applies to attributes names for all data types: resources, spans, logs, # span events, metric labels. # # The translations in "all" section are performed first (for each particular version). # Only after that the translations in the specific section ("resources", "traces", # "metrics" or "logs") that corresponds to the data type are applied. # # The only translation possible in section "all" is renaming of attributes in # versions. For human readability versions are listed in reverse chronological # order, however note that the translations are applied in the order defined by # semver ordering. all: changes: - rename_attributes: attribute_map: # Mapping of attribute names (label names for metrics). The key is the old name # used prior to this version, the value is the new name starting from this version. # Rename k8s.* to kubernetes.* k8s.cluster.name: kubernetes.cluster.name k8s.namespace.name: kubernetes.namespace.name k8s.node.name: kubernetes.node.name k8s.node.uid: kubernetes.node.uid k8s.pod.name: kubernetes.pod.name k8s.pod.uid: kubernetes.pod.uid k8s.container.name: kubernetes.container.name k8s.replicaset.name: kubernetes.replicaset.name k8s.replicaset.uid: kubernetes.replicaset.uid k8s.cronjob.name: kubernetes.cronjob.name k8s.cronjob.uid: kubernetes.cronjob.uid k8s.job.name: kubernetes.job.name k8s.job.uid: kubernetes.job.uid k8s.statefulset.name: kubernetes.statefulset.name k8s.statefulset.uid: kubernetes.statefulset.uid k8s.daemonset.name: kubernetes.daemonset.name k8s.daemonset.uid: kubernetes.daemonset.uid k8s.deployment.name: kubernetes.deployment.name k8s.deployment.uid: kubernetes.deployment.uid service.namespace: service.namespace.name # Like "all" the "resources" section may contain only attribute renaming translations. # The only translation possible in this section is renaming of attributes in # versions. resources: changes: - rename_attributes: attribute_map: # Mapping of attribute names. The key is the old name # used prior to this version, the value is the new name starting from this version. telemetry.auto.version: telemetry.auto_instr.version spans: changes: # Sequence of translations to apply to convert the schema from a prior version # to this version. The order in this sequence is important. Translations are # applied from top to bottom in the listed order. - rename_attributes: # Rename attributes of all spans, regardless of span name. # The keys are the old attribute name used prior to this version, the values are # the new attribute name starting from this version. attribute_map: peer.service: peer.service.name apply_to_spans: # apply only to spans named "HTTP GET" - "HTTP GET" span_events: changes: # Sequence of translations to apply to convert the schema from a prior version # to this version. The order in this sequence is important. Translations are # applied from top to bottom in the listed order. - rename_events: # Rename events. The keys are old event names, the values are the new event names. name_map: {exception.stacktrace: exception.stack_trace} - rename_attributes: # Rename attributes of events. # The keys are the old attribute name used prior to this version, the values are # the new attribute name starting from this version. attribute_map: peer.service: peer.service.name apply_to_events: # Optional event names to apply to. If empty applies to all events. # Conditions in apply_to_spans and apply_to_events are logical AND-ed, # both should match for transformation to be applied. - exception.stack_trace metrics: changes: # Sequence of translations to apply to convert the schema from a prior version # to this version. The order in this sequence is important. Translations are # applied from top to bottom in the listed order. - rename_attributes: # Rename attributes of all metrics, regardless of metric name. # The keys are the old attribute name used prior to this version, the values are # the new attribute name starting from this version. attribute_map: http.status_code: http.response_status_code - rename_metrics: # Rename metrics. The keys are old metric names, the values are the new metric names. container.cpu.usage.total: cpu.usage.total container.memory.usage.max: memory.usage.max - rename_attributes: apply_to_metrics: # Name of the metric to apply this rule to. If empty the rule applies to all metrics. - system.cpu.utilization - system.memory.usage - system.memory.utilization - system.paging.usage attribute_map: # The keys are the old attribute name used prior to this version, the values are # the new attribute name starting from this version. status: state logs: changes: - rename_attributes: attribute_map: process.executable_name: process.executable.name 1.0.0: opentelemetry-go-1.21.0/schema/v1.0/types/000077500000000000000000000000001452547353200201745ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/v1.0/types/types.go000066400000000000000000000016731452547353200216760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types // import "go.opentelemetry.io/otel/schema/v1.0/types" // TelemetryVersion is a version number key in the schema file (e.g. "1.7.0"). type TelemetryVersion string // SpanName is span name string. type SpanName string // EventName is an event name string. type EventName string // MetricName is a metric name string. type MetricName string opentelemetry-go-1.21.0/schema/v1.1/000077500000000000000000000000001452547353200170315ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/v1.1/ast/000077500000000000000000000000001452547353200176205ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/v1.1/ast/ast_schema.go000066400000000000000000000041221452547353200222550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ast // import "go.opentelemetry.io/otel/schema/v1.1/ast" import ( ast10 "go.opentelemetry.io/otel/schema/v1.0/ast" "go.opentelemetry.io/otel/schema/v1.1/types" ) // Schema represents a Schema file in FileFormat 1.1.0 as defined in // https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md type Schema struct { // Schema file format. SHOULD be 1.1.0 for the current specification version. // See https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md#schema-file-format-number FileFormat string `yaml:"file_format"` // Schema URL is an identifier of a Schema. The URL specifies a location of this // Schema File that can be retrieved (so it is a URL and not just a URI) using HTTP // or HTTPS protocol. // See https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md#schema-url SchemaURL string `yaml:"schema_url"` // Versions section that lists changes that happened in each particular version. Versions map[types.TelemetryVersion]VersionDef } // VersionDef corresponds to a section representing one version under the "versions" // top-level key. // Note that most of the fields are the same as in ast 1.0 package, only Metrics are defined // differently, since only that field has changed from 1.0 to 1.1 of schema file format. type VersionDef struct { All ast10.Attributes Resources ast10.Attributes Spans ast10.Spans SpanEvents ast10.SpanEvents `yaml:"span_events"` Logs ast10.Logs Metrics Metrics } opentelemetry-go-1.21.0/schema/v1.1/ast/metrics.go000066400000000000000000000043441452547353200216220ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ast // import "go.opentelemetry.io/otel/schema/v1.1/ast" import ( ast10 "go.opentelemetry.io/otel/schema/v1.0/ast" types10 "go.opentelemetry.io/otel/schema/v1.0/types" types11 "go.opentelemetry.io/otel/schema/v1.1/types" ) // Metrics corresponds to a section representing a list of changes that happened // to metrics schema in a particular version. type Metrics struct { Changes []MetricsChange } // MetricsChange corresponds to a section representing metrics change. type MetricsChange struct { RenameMetrics map[types10.MetricName]types10.MetricName `yaml:"rename_metrics"` RenameAttributes *ast10.AttributeMapForMetrics `yaml:"rename_attributes"` Split *SplitMetric `yaml:"split"` } // SplitMetric corresponds to a section representing a splitting of a metric // into multiple metrics by eliminating an attribute. // SplitMetrics is introduced in schema file format 1.1, // see https://github.com/open-telemetry/opentelemetry-specification/pull/2653 type SplitMetric struct { // Name of the old metric to split. ApplyToMetric types10.MetricName `yaml:"apply_to_metric"` // Name of attribute in the old metric to use for splitting. The attribute will be // eliminated, the new metric will not have it. ByAttribute types11.AttributeName `yaml:"by_attribute"` // Names of new metrics to create, one for each possible value of attribute. // map of key/values. The keys are the new metric name starting from this version, // the values are old attribute value used in the previous version. MetricsFromAttributes map[types10.MetricName]types11.AttributeValue `yaml:"metrics_from_attributes"` } opentelemetry-go-1.21.0/schema/v1.1/parser.go000066400000000000000000000033001452547353200206500ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package schema // import "go.opentelemetry.io/otel/schema/v1.1" import ( "io" "os" "gopkg.in/yaml.v3" "go.opentelemetry.io/otel/schema/internal" "go.opentelemetry.io/otel/schema/v1.1/ast" ) // Major file version number that this library supports. const supportedFormatMajor = 1 // Maximum minor version number that this library supports. const supportedFormatMinor = 1 // ParseFile a schema file. schemaFilePath is the file path. func ParseFile(schemaFilePath string) (*ast.Schema, error) { file, err := os.Open(schemaFilePath) if err != nil { return nil, err } return Parse(file) } // Parse a schema file. schemaFileContent is the readable content of the schema file. func Parse(schemaFileContent io.Reader) (*ast.Schema, error) { var ts ast.Schema d := yaml.NewDecoder(schemaFileContent) d.KnownFields(true) err := d.Decode(&ts) if err != nil { return nil, err } err = internal.CheckFileFormatField(ts.FileFormat, supportedFormatMajor, supportedFormatMinor) if err != nil { return nil, err } err = internal.CheckSchemaURL(ts.SchemaURL) if err != nil { return nil, err } return &ts, nil } opentelemetry-go-1.21.0/schema/v1.1/parser_test.go000066400000000000000000000132371452547353200217210ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package schema import ( "testing" "github.com/stretchr/testify/assert" ast10 "go.opentelemetry.io/otel/schema/v1.0/ast" types10 "go.opentelemetry.io/otel/schema/v1.0/types" ast11 "go.opentelemetry.io/otel/schema/v1.1/ast" types11 "go.opentelemetry.io/otel/schema/v1.1/types" ) func TestParseSchemaFile(t *testing.T) { ts, err := ParseFile("testdata/valid-example.yaml") assert.NoError(t, err) assert.NotNil(t, ts) assert.EqualValues( t, &ast11.Schema{ FileFormat: "1.1.0", SchemaURL: "https://opentelemetry.io/schemas/1.1.0", Versions: map[types11.TelemetryVersion]ast11.VersionDef{ "1.0.0": {}, "1.1.0": { All: ast10.Attributes{ Changes: []ast10.AttributeChange{ { RenameAttributes: &ast10.RenameAttributes{ AttributeMap: ast10.AttributeMap{ "k8s.cluster.name": "kubernetes.cluster.name", "k8s.namespace.name": "kubernetes.namespace.name", "k8s.node.name": "kubernetes.node.name", "k8s.node.uid": "kubernetes.node.uid", "k8s.pod.name": "kubernetes.pod.name", "k8s.pod.uid": "kubernetes.pod.uid", "k8s.container.name": "kubernetes.container.name", "k8s.replicaset.name": "kubernetes.replicaset.name", "k8s.replicaset.uid": "kubernetes.replicaset.uid", "k8s.cronjob.name": "kubernetes.cronjob.name", "k8s.cronjob.uid": "kubernetes.cronjob.uid", "k8s.job.name": "kubernetes.job.name", "k8s.job.uid": "kubernetes.job.uid", "k8s.statefulset.name": "kubernetes.statefulset.name", "k8s.statefulset.uid": "kubernetes.statefulset.uid", "k8s.daemonset.name": "kubernetes.daemonset.name", "k8s.daemonset.uid": "kubernetes.daemonset.uid", "k8s.deployment.name": "kubernetes.deployment.name", "k8s.deployment.uid": "kubernetes.deployment.uid", "service.namespace": "service.namespace.name", }, }, }, }, }, Resources: ast10.Attributes{ Changes: []ast10.AttributeChange{ { RenameAttributes: &ast10.RenameAttributes{ AttributeMap: ast10.AttributeMap{ "telemetry.auto.version": "telemetry.auto_instr.version", }, }, }, }, }, Spans: ast10.Spans{ Changes: []ast10.SpansChange{ { RenameAttributes: &ast10.AttributeMapForSpans{ AttributeMap: ast10.AttributeMap{ "peer.service": "peer.service.name", }, ApplyToSpans: []types10.SpanName{"HTTP GET"}, }, }, }, }, SpanEvents: ast10.SpanEvents{ Changes: []ast10.SpanEventsChange{ { RenameEvents: &ast10.RenameSpanEvents{ EventNameMap: map[string]string{ "exception.stacktrace": "exception.stack_trace", }, }, }, { RenameAttributes: &ast10.RenameSpanEventAttributes{ ApplyToEvents: []types10.EventName{"exception.stack_trace"}, AttributeMap: ast10.AttributeMap{ "peer.service": "peer.service.name", }, }, }, }, }, Logs: ast10.Logs{ Changes: []ast10.LogsChange{ { RenameAttributes: &ast10.RenameAttributes{ AttributeMap: map[string]string{ "process.executable_name": "process.executable.name", }, }, }, }, }, Metrics: ast11.Metrics{ Changes: []ast11.MetricsChange{ { RenameAttributes: &ast10.AttributeMapForMetrics{ AttributeMap: map[string]string{ "http.status_code": "http.response_status_code", }, }, }, { RenameMetrics: map[types10.MetricName]types10.MetricName{ "container.cpu.usage.total": "cpu.usage.total", "container.memory.usage.max": "memory.usage.max", }, }, { RenameAttributes: &ast10.AttributeMapForMetrics{ ApplyToMetrics: []types10.MetricName{ "system.cpu.utilization", "system.memory.usage", "system.memory.utilization", "system.paging.usage", }, AttributeMap: map[string]string{ "status": "state", }, }, }, { Split: &ast11.SplitMetric{ ApplyToMetric: "system.paging.operations", ByAttribute: "direction", MetricsFromAttributes: map[types10.MetricName]types11.AttributeValue{ "system.paging.operations.in": "in", "system.paging.operations.out": "out", }, }, }, }, }, }, }, }, ts, ) } func TestFailParseFileUnsupportedFileFormat(t *testing.T) { ts, err := ParseFile("testdata/unsupported-file-format.yaml") assert.ErrorContains(t, err, "unsupported schema file format minor version number") assert.Nil(t, ts) } func TestFailParseFileUnknownField(t *testing.T) { ts, err := ParseFile("testdata/unknown-field.yaml") assert.ErrorContains(t, err, "field Resources not found in type ast.VersionDef") assert.Nil(t, ts) } opentelemetry-go-1.21.0/schema/v1.1/testdata/000077500000000000000000000000001452547353200206425ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/v1.1/testdata/unknown-field.yaml000066400000000000000000000005721452547353200243120ustar00rootroot00000000000000file_format: 1.1.0 schema_url: https://opentelemetry.io/schemas/1.1.0 versions: 1.1.0: all: # Valid entry. changes: - rename_attributes: k8s.cluster.name: kubernetes.cluster.name Resources: # Invalid uppercase. changes: - rename_attributes: attribute_map: browser.user_agent: user_agent.original 1.0.0: opentelemetry-go-1.21.0/schema/v1.1/testdata/unsupported-file-format.yaml000066400000000000000000000003531452547353200263220ustar00rootroot00000000000000file_format: 1.2.0 schema_url: https://opentelemetry.io/schemas/1.1.0 versions: 1.1.0: all: changes: - rename_attributes: attribute_map: k8s.cluster.name: kubernetes.cluster.name 1.0.0: opentelemetry-go-1.21.0/schema/v1.1/testdata/valid-example.yaml000066400000000000000000000153041452547353200242610ustar00rootroot00000000000000file_format: 1.1.0 schema_url: https://opentelemetry.io/schemas/1.1.0 versions: 1.1.0: # Section "all" applies to attribute names for all data types: resources, spans, logs, # span events, metric labels. # # The translations in "all" section are performed first (for each particular version). # Only after that the translations in the specific section ("resources", "traces", # "metrics" or "logs") that corresponds to the data type are applied. # # The only translation possible in section "all" is renaming of attributes in # versions. For human readability versions are listed in reverse chronological # order, however note that the translations are applied in the order defined by # semver ordering. all: changes: - rename_attributes: attribute_map: # Mapping of attribute names (label names for metrics). The key is the old name # used prior to this version, the value is the new name starting from this version. # Rename k8s.* to kubernetes.* k8s.cluster.name: kubernetes.cluster.name k8s.namespace.name: kubernetes.namespace.name k8s.node.name: kubernetes.node.name k8s.node.uid: kubernetes.node.uid k8s.pod.name: kubernetes.pod.name k8s.pod.uid: kubernetes.pod.uid k8s.container.name: kubernetes.container.name k8s.replicaset.name: kubernetes.replicaset.name k8s.replicaset.uid: kubernetes.replicaset.uid k8s.cronjob.name: kubernetes.cronjob.name k8s.cronjob.uid: kubernetes.cronjob.uid k8s.job.name: kubernetes.job.name k8s.job.uid: kubernetes.job.uid k8s.statefulset.name: kubernetes.statefulset.name k8s.statefulset.uid: kubernetes.statefulset.uid k8s.daemonset.name: kubernetes.daemonset.name k8s.daemonset.uid: kubernetes.daemonset.uid k8s.deployment.name: kubernetes.deployment.name k8s.deployment.uid: kubernetes.deployment.uid service.namespace: service.namespace.name # Like "all" the "resources" section may contain only attribute renaming translations. # The only translation possible in this section is renaming of attributes in # versions. resources: changes: - rename_attributes: attribute_map: # Mapping of attribute names. The key is the old name # used prior to this version, the value is the new name starting from this version. telemetry.auto.version: telemetry.auto_instr.version spans: changes: # Sequence of translations to apply to convert the schema from a prior version # to this version. The order in this sequence is important. Translations are # applied from top to bottom in the listed order. - rename_attributes: # Rename attributes of all spans, regardless of span name. # The keys are the old attribute name used prior to this version, the values are # the new attribute name starting from this version. attribute_map: peer.service: peer.service.name apply_to_spans: # apply only to spans named "HTTP GET" - "HTTP GET" span_events: changes: # Sequence of translations to apply to convert the schema from a prior version # to this version. The order in this sequence is important. Translations are # applied from top to bottom in the listed order. - rename_events: # Rename events. The keys are old event names, the values are the new event names. name_map: {exception.stacktrace: exception.stack_trace} - rename_attributes: # Rename attributes of events. # The keys are the old attribute name used prior to this version, the values are # the new attribute name starting from this version. attribute_map: peer.service: peer.service.name apply_to_events: # Optional event names to apply to. If empty applies to all events. # Conditions in apply_to_spans and apply_to_events are logical AND-ed, # both should match for transformation to be applied. - exception.stack_trace metrics: changes: # Sequence of translations to apply to convert the schema from a prior version # to this version. The order in this sequence is important. Translations are # applied from top to bottom in the listed order. - rename_attributes: # Rename attributes of all metrics, regardless of metric name. # The keys are the old attribute name used prior to this version, the values are # the new attribute name starting from this version. attribute_map: http.status_code: http.response_status_code - rename_metrics: # Rename metrics. The keys are old metric names, the values are the new metric names. container.cpu.usage.total: cpu.usage.total container.memory.usage.max: memory.usage.max - rename_attributes: apply_to_metrics: # Name of the metric to apply this rule to. If empty the rule applies to all metrics. - system.cpu.utilization - system.memory.usage - system.memory.utilization - system.paging.usage attribute_map: # The keys are the old attribute name used prior to this version, the values are # the new attribute name starting from this version. status: state - split: # Rules to split a metric into several metrics using an attribute for split. # This example rule implements the change done by # https://github.com/open-telemetry/opentelemetry-specification/pull/2617 # Name of old metric to split. apply_to_metric: system.paging.operations # Name of attribute in the old metric to use for splitting. The attribute will be # eliminated, the new metric will not have it. by_attribute: direction # Names of new metrics to create, one for each possible value of the attribute. metrics_from_attributes: # If "direction" attribute equals "in" create a new metric called "system.paging.operations.in". system.paging.operations.in: in system.paging.operations.out: out logs: changes: - rename_attributes: attribute_map: process.executable_name: process.executable.name 1.0.0: opentelemetry-go-1.21.0/schema/v1.1/types/000077500000000000000000000000001452547353200201755ustar00rootroot00000000000000opentelemetry-go-1.21.0/schema/v1.1/types/types.go000066400000000000000000000017521452547353200216750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types // import "go.opentelemetry.io/otel/schema/v1.1/types" import types10 "go.opentelemetry.io/otel/schema/v1.0/types" // TelemetryVersion is a version number key in the schema file (e.g. "1.7.0"). type TelemetryVersion types10.TelemetryVersion // AttributeName is an attribute name string. type AttributeName string // AttributeValue is an attribute value. type AttributeValue interface{} opentelemetry-go-1.21.0/sdk/000077500000000000000000000000001452547353200156655ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/go.mod000066400000000000000000000012121452547353200167670ustar00rootroot00000000000000module go.opentelemetry.io/otel/sdk go 1.20 replace go.opentelemetry.io/otel => ../ require ( github.com/go-logr/logr v1.3.0 github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/trace v1.21.0 golang.org/x/sys v0.14.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/trace => ../trace replace go.opentelemetry.io/otel/metric => ../metric opentelemetry-go-1.21.0/sdk/go.sum000066400000000000000000000031211452547353200170150ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/sdk/instrumentation/000077500000000000000000000000001452547353200211305ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/instrumentation/doc.go000066400000000000000000000022241452547353200222240ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package instrumentation provides types to represent the code libraries that // provide OpenTelemetry instrumentation. These types are used in the // OpenTelemetry signal pipelines to identify the source of telemetry. // // See // https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md // and // https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md // for more information. package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" opentelemetry-go-1.21.0/sdk/instrumentation/library.go000066400000000000000000000014311452547353200231220ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. // Deprecated: please use Scope instead. type Library = Scope opentelemetry-go-1.21.0/sdk/instrumentation/scope.go000066400000000000000000000017631452547353200225770ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Scope represents the instrumentation scope. type Scope struct { // Name is the name of the instrumentation scope. This should be the // Go package name of that scope. Name string // Version is the version of the instrumentation scope. Version string // SchemaURL of the telemetry emitted by the scope. SchemaURL string } opentelemetry-go-1.21.0/sdk/internal/000077500000000000000000000000001452547353200175015ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/internal/env/000077500000000000000000000000001452547353200202715ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/internal/env/env.go000066400000000000000000000147371452547353200214240ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package env // import "go.opentelemetry.io/otel/sdk/internal/env" import ( "os" "strconv" "go.opentelemetry.io/otel/internal/global" ) // Environment variable names. const ( // BatchSpanProcessorScheduleDelayKey is the delay interval between two // consecutive exports (i.e. 5000). BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to // export data (i.e. 3000). BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. // 512). Note: it must be less than or equal to // EnvBatchSpanProcessorMaxQueueSize. BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" // AttributeValueLengthKey is the maximum allowed attribute value size. AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" // AttributeCountKey is the maximum allowed span attribute count. AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" // SpanAttributeValueLengthKey is the maximum allowed attribute value size // for a span. SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" // SpanAttributeCountKey is the maximum allowed span attribute count for a // span. SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" // SpanEventCountKey is the maximum allowed span event count. SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" // SpanEventAttributeCountKey is the maximum allowed attribute per span // event count. SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" // SpanLinkCountKey is the maximum allowed span link count. SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" // SpanLinkAttributeCountKey is the maximum allowed attribute per span // link count. SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" ) // firstInt returns the value of the first matching environment variable from // keys. If the value is not an integer or no match is found, defaultValue is // returned. func firstInt(defaultValue int, keys ...string) int { for _, key := range keys { value := os.Getenv(key) if value == "" { continue } intValue, err := strconv.Atoi(value) if err != nil { global.Info("Got invalid value, number value expected.", key, value) return defaultValue } return intValue } return defaultValue } // IntEnvOr returns the int value of the environment variable with name key if // it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. func IntEnvOr(key string, defaultValue int) int { value := os.Getenv(key) if value == "" { return defaultValue } intValue, err := strconv.Atoi(value) if err != nil { global.Info("Got invalid value, number value expected.", key, value) return defaultValue } return intValue } // BatchSpanProcessorScheduleDelay returns the environment variable value for // the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is // returned. func BatchSpanProcessorScheduleDelay(defaultValue int) int { return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) } // BatchSpanProcessorExportTimeout returns the environment variable value for // the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is // returned. func BatchSpanProcessorExportTimeout(defaultValue int) int { return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) } // BatchSpanProcessorMaxQueueSize returns the environment variable value for // the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is // returned. func BatchSpanProcessorMaxQueueSize(defaultValue int) int { return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) } // BatchSpanProcessorMaxExportBatchSize returns the environment variable value for // the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue // is returned. func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) } // SpanAttributeValueLength returns the environment variable value for the // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is // returned or defaultValue if that is not set. func SpanAttributeValueLength(defaultValue int) int { return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) } // SpanAttributeCount returns the environment variable value for the // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or // defaultValue if that is not set. func SpanAttributeCount(defaultValue int) int { return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) } // SpanEventCount returns the environment variable value for the // OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is // returned. func SpanEventCount(defaultValue int) int { return IntEnvOr(SpanEventCountKey, defaultValue) } // SpanEventAttributeCount returns the environment variable value for the // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue // is returned. func SpanEventAttributeCount(defaultValue int) int { return IntEnvOr(SpanEventAttributeCountKey, defaultValue) } // SpanLinkCount returns the environment variable value for the // OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is // returned. func SpanLinkCount(defaultValue int) int { return IntEnvOr(SpanLinkCountKey, defaultValue) } // SpanLinkAttributeCount returns the environment variable value for the // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is // returned. func SpanLinkAttributeCount(defaultValue int) int { return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) } opentelemetry-go-1.21.0/sdk/internal/env/env_test.go000066400000000000000000000057771452547353200224670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package env import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ottest "go.opentelemetry.io/otel/sdk/internal/internaltest" ) func TestEnvParse(t *testing.T) { testCases := []struct { name string keys []string f func(int) int }{ { name: "BatchSpanProcessorScheduleDelay", keys: []string{BatchSpanProcessorScheduleDelayKey}, f: BatchSpanProcessorScheduleDelay, }, { name: "BatchSpanProcessorExportTimeout", keys: []string{BatchSpanProcessorExportTimeoutKey}, f: BatchSpanProcessorExportTimeout, }, { name: "BatchSpanProcessorMaxQueueSize", keys: []string{BatchSpanProcessorMaxQueueSizeKey}, f: BatchSpanProcessorMaxQueueSize, }, { name: "BatchSpanProcessorMaxExportBatchSize", keys: []string{BatchSpanProcessorMaxExportBatchSizeKey}, f: BatchSpanProcessorMaxExportBatchSize, }, { name: "SpanAttributeValueLength", keys: []string{SpanAttributeValueLengthKey, AttributeValueLengthKey}, f: SpanAttributeValueLength, }, { name: "SpanAttributeCount", keys: []string{SpanAttributeCountKey, AttributeCountKey}, f: SpanAttributeCount, }, { name: "SpanEventCount", keys: []string{SpanEventCountKey}, f: SpanEventCount, }, { name: "SpanEventAttributeCount", keys: []string{SpanEventAttributeCountKey}, f: SpanEventAttributeCount, }, { name: "SpanLinkCount", keys: []string{SpanLinkCountKey}, f: SpanLinkCount, }, { name: "SpanLinkAttributeCount", keys: []string{SpanLinkAttributeCountKey}, f: SpanLinkAttributeCount, }, } const ( defVal = 500 envVal = 2500 envValStr = "2500" invalid = "localhost" empty = "" ) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { for _, key := range tc.keys { t.Run(key, func(t *testing.T) { envStore := ottest.NewEnvStore() t.Cleanup(func() { require.NoError(t, envStore.Restore()) }) envStore.Record(key) assert.Equal(t, defVal, tc.f(defVal), "environment variable unset") require.NoError(t, os.Setenv(key, envValStr)) assert.Equal(t, envVal, tc.f(defVal), "environment variable set/valid") require.NoError(t, os.Setenv(key, invalid)) assert.Equal(t, defVal, tc.f(defVal), "invalid value") require.NoError(t, os.Setenv(key, empty)) assert.Equal(t, defVal, tc.f(defVal), "empty value") }) } }) } } opentelemetry-go-1.21.0/sdk/internal/gen.go000066400000000000000000000043711452547353200206060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/sdk/internal" //go:generate gotmpl --body=../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go //go:generate gotmpl --body=../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go //go:generate gotmpl --body=../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go //go:generate gotmpl --body=../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go //go:generate gotmpl --body=../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go //go:generate gotmpl --body=../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go //go:generate gotmpl --body=../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go //go:generate gotmpl --body=../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/sdk/internal/matchers\"}" --out=internaltest/harness.go //go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go //go:generate gotmpl --body=../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go //go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go //go:generate gotmpl --body=../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go opentelemetry-go-1.21.0/sdk/internal/internal.go000066400000000000000000000020431452547353200216430ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/sdk/internal" import "time" // MonotonicEndTime returns the end time at present // but offset from start, monotonically. // // The monotonic clock is used in subtractions hence // the duration since start added back to start gives // end as a monotonic time. // See https://golang.org/pkg/time/#hdr-Monotonic_Clocks func MonotonicEndTime(start time.Time) time.Time { return start.Add(time.Since(start)) } opentelemetry-go-1.21.0/sdk/internal/internaltest/000077500000000000000000000000001452547353200222155ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/internal/internaltest/alignment.go000066400000000000000000000045211452547353200245240ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/alignment.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/sdk/internal/internaltest" /* This file contains common utilities and objects to validate memory alignment of Go types. The primary use of this functionality is intended to ensure `struct` fields that need to be 64-bit aligned so they can be passed as arguments to 64-bit atomic operations. The common workflow is to define a slice of `FieldOffset` and pass them to the `Aligned8Byte` function from within a `TestMain` function from a package's tests. It is important to make this call from the `TestMain` function prior to running the rest of the test suit as it can provide useful diagnostics about field alignment instead of ambiguous nil pointer dereference and runtime panic. For more information: https://github.com/open-telemetry/opentelemetry-go/issues/341 */ import ( "fmt" "io" ) // FieldOffset is a preprocessor representation of a struct field alignment. type FieldOffset struct { // Name of the field. Name string // Offset of the field in bytes. // // To compute this at compile time use unsafe.Offsetof. Offset uintptr } // Aligned8Byte returns if all fields are aligned modulo 8-bytes. // // Error messaging is printed to out for any field determined misaligned. func Aligned8Byte(fields []FieldOffset, out io.Writer) bool { misaligned := make([]FieldOffset, 0) for _, f := range fields { if f.Offset%8 != 0 { misaligned = append(misaligned, f) } } if len(misaligned) == 0 { return true } fmt.Fprintln(out, "struct fields not aligned for 64-bit atomic operations:") for _, f := range misaligned { fmt.Fprintf(out, " %s: %d-byte offset\n", f.Name, f.Offset) } return false } opentelemetry-go-1.21.0/sdk/internal/internaltest/env.go000066400000000000000000000041201452547353200233310ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/env.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/sdk/internal/internaltest" import ( "os" ) type Env struct { Name string Value string Exists bool } // EnvStore stores and recovers environment variables. type EnvStore interface { // Records the environment variable into the store. Record(key string) // Restore recovers the environment variables in the store. Restore() error } var _ EnvStore = (*envStore)(nil) type envStore struct { store map[string]Env } func (s *envStore) add(env Env) { s.store[env.Name] = env } func (s *envStore) Restore() error { var err error for _, v := range s.store { if v.Exists { err = os.Setenv(v.Name, v.Value) } else { err = os.Unsetenv(v.Name) } if err != nil { return err } } return nil } func (s *envStore) setEnv(key, value string) error { s.Record(key) err := os.Setenv(key, value) if err != nil { return err } return nil } func (s *envStore) Record(key string) { originValue, exists := os.LookupEnv(key) s.add(Env{ Name: key, Value: originValue, Exists: exists, }) } func NewEnvStore() EnvStore { return newEnvStore() } func newEnvStore() *envStore { return &envStore{store: make(map[string]Env)} } func SetEnvVariables(env map[string]string) (EnvStore, error) { envStore := newEnvStore() for k, v := range env { err := envStore.setEnv(k, v) if err != nil { return nil, err } } return envStore, nil } opentelemetry-go-1.21.0/sdk/internal/internaltest/env_test.go000066400000000000000000000114731452547353200244010ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/env_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) type EnvStoreTestSuite struct { suite.Suite } func (s *EnvStoreTestSuite) Test_add() { envStore := newEnvStore() e := Env{ Name: "name", Value: "value", Exists: true, } envStore.add(e) envStore.add(e) s.Assert().Len(envStore.store, 1) } func (s *EnvStoreTestSuite) TestRecord() { testCases := []struct { name string env Env expectedEnvStore *envStore }{ { name: "record exists env", env: Env{ Name: "name", Value: "value", Exists: true, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "value", Exists: true, }, }}, }, { name: "record exists env, but its value is empty", env: Env{ Name: "name", Value: "", Exists: true, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "", Exists: true, }, }}, }, { name: "record not exists env", env: Env{ Name: "name", Exists: false, }, expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Exists: false, }, }}, }, } for _, tc := range testCases { s.Run(tc.name, func() { if tc.env.Exists { s.Assert().NoError(os.Setenv(tc.env.Name, tc.env.Value)) } envStore := newEnvStore() envStore.Record(tc.env.Name) s.Assert().Equal(tc.expectedEnvStore, envStore) if tc.env.Exists { s.Assert().NoError(os.Unsetenv(tc.env.Name)) } }) } } func (s *EnvStoreTestSuite) TestRestore() { testCases := []struct { name string env Env expectedEnvValue string expectedEnvExists bool }{ { name: "exists env", env: Env{ Name: "name", Value: "value", Exists: true, }, expectedEnvValue: "value", expectedEnvExists: true, }, { name: "no exists env", env: Env{ Name: "name", Exists: false, }, expectedEnvExists: false, }, } for _, tc := range testCases { s.Run(tc.name, func() { envStore := newEnvStore() envStore.add(tc.env) // Backup backup := newEnvStore() backup.Record(tc.env.Name) s.Require().NoError(os.Unsetenv(tc.env.Name)) s.Assert().NoError(envStore.Restore()) v, exists := os.LookupEnv(tc.env.Name) s.Assert().Equal(tc.expectedEnvValue, v) s.Assert().Equal(tc.expectedEnvExists, exists) // Restore s.Require().NoError(backup.Restore()) }) } } func (s *EnvStoreTestSuite) Test_setEnv() { testCases := []struct { name string key string value string expectedEnvStore *envStore expectedEnvValue string expectedEnvExists bool }{ { name: "normal", key: "name", value: "value", expectedEnvStore: &envStore{store: map[string]Env{ "name": { Name: "name", Value: "other value", Exists: true, }, }}, expectedEnvValue: "value", expectedEnvExists: true, }, } for _, tc := range testCases { s.Run(tc.name, func() { envStore := newEnvStore() // Backup backup := newEnvStore() backup.Record(tc.key) s.Require().NoError(os.Setenv(tc.key, "other value")) s.Assert().NoError(envStore.setEnv(tc.key, tc.value)) s.Assert().Equal(tc.expectedEnvStore, envStore) v, exists := os.LookupEnv(tc.key) s.Assert().Equal(tc.expectedEnvValue, v) s.Assert().Equal(tc.expectedEnvExists, exists) // Restore s.Require().NoError(backup.Restore()) }) } } func TestEnvStoreTestSuite(t *testing.T) { suite.Run(t, new(EnvStoreTestSuite)) } func TestSetEnvVariables(t *testing.T) { envs := map[string]string{ "name1": "value1", "name2": "value2", } // Backup backup := newEnvStore() for k := range envs { backup.Record(k) } defer func() { require.NoError(t, backup.Restore()) }() store, err := SetEnvVariables(envs) assert.NoError(t, err) require.IsType(t, &envStore{}, store) concreteStore := store.(*envStore) assert.Len(t, concreteStore.store, 2) assert.Equal(t, backup, concreteStore) } opentelemetry-go-1.21.0/sdk/internal/internaltest/errors.go000066400000000000000000000016651452547353200240700ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/errors.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/sdk/internal/internaltest" type TestError string var _ error = TestError("") func NewTestError(s string) error { return TestError(s) } func (e TestError) Error() string { return string(e) } opentelemetry-go-1.21.0/sdk/internal/internaltest/harness.go000066400000000000000000000216611452547353200242150ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/harness.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/sdk/internal/internaltest" import ( "context" "fmt" "sync" "testing" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/internal/matchers" "go.opentelemetry.io/otel/trace" ) // Harness is a testing harness used to test implementations of the // OpenTelemetry API. type Harness struct { t *testing.T } // NewHarness returns an instantiated *Harness using t. func NewHarness(t *testing.T) *Harness { return &Harness{ t: t, } } // TestTracerProvider runs validation tests for an implementation of the OpenTelemetry // TracerProvider API. func (h *Harness) TestTracerProvider(subjectFactory func() trace.TracerProvider) { h.t.Run("#Start", func(t *testing.T) { t.Run("allow creating an arbitrary number of TracerProvider instances", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) tp1 := subjectFactory() tp2 := subjectFactory() e.Expect(tp1).NotToEqual(tp2) }) t.Run("all methods are safe to be called concurrently", func(t *testing.T) { t.Parallel() runner := func(tp trace.TracerProvider) <-chan struct{} { done := make(chan struct{}) go func(tp trace.TracerProvider) { var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func(name, version string) { _ = tp.Tracer(name, trace.WithInstrumentationVersion(version)) wg.Done() }(fmt.Sprintf("tracer %d", i%5), fmt.Sprintf("%d", i)) } wg.Wait() done <- struct{}{} }(tp) return done } matchers.NewExpecter(t).Expect(func() { // Run with multiple TracerProvider to ensure they encapsulate // their own Tracers. tp1 := subjectFactory() tp2 := subjectFactory() done1 := runner(tp1) done2 := runner(tp2) <-done1 <-done2 }).NotToPanic() }) }) } // TestTracer runs validation tests for an implementation of the OpenTelemetry // Tracer API. func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) { h.t.Run("#Start", func(t *testing.T) { t.Run("propagates the original context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctxKey := testCtxKey{} ctxValue := "ctx value" ctx := context.WithValue(context.Background(), ctxKey, ctxValue) ctx, _ = subject.Start(ctx, "test") e.Expect(ctx.Value(ctxKey)).ToEqual(ctxValue) }) t.Run("returns a span containing the expected properties", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, span := subject.Start(context.Background(), "test") e.Expect(span).NotToBeNil() e.Expect(span.SpanContext().IsValid()).ToBeTrue() }) t.Run("stores the span on the provided context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, span := subject.Start(context.Background(), "test") e.Expect(span).NotToBeNil() e.Expect(span.SpanContext()).NotToEqual(trace.SpanContext{}) e.Expect(trace.SpanFromContext(ctx)).ToEqual(span) }) t.Run("starts spans with unique trace and span IDs", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, span1 := subject.Start(context.Background(), "span1") _, span2 := subject.Start(context.Background(), "span2") sc1 := span1.SpanContext() sc2 := span2.SpanContext() e.Expect(sc1.TraceID()).NotToEqual(sc2.TraceID()) e.Expect(sc1.SpanID()).NotToEqual(sc2.SpanID()) }) t.Run("propagates a parent's trace ID through the context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, parent := subject.Start(context.Background(), "parent") _, child := subject.Start(ctx, "child") psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("ignores parent's trace ID when new root is requested", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() ctx, parent := subject.Start(context.Background(), "parent") _, child := subject.Start(ctx, "child", trace.WithNewRoot()) psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).NotToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("propagates remote parent's trace ID through the context", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, remoteParent := subject.Start(context.Background(), "remote parent") parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext()) _, child := subject.Start(parentCtx, "child") psc := remoteParent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("ignores remote parent's trace ID when new root is requested", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) subject := subjectFactory() _, remoteParent := subject.Start(context.Background(), "remote parent") parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext()) _, child := subject.Start(parentCtx, "child", trace.WithNewRoot()) psc := remoteParent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).NotToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }) t.Run("all methods are safe to be called concurrently", func(t *testing.T) { t.Parallel() e := matchers.NewExpecter(t) tracer := subjectFactory() ctx, parent := tracer.Start(context.Background(), "span") runner := func(tp trace.Tracer) <-chan struct{} { done := make(chan struct{}) go func(tp trace.Tracer) { var wg sync.WaitGroup for i := 0; i < 20; i++ { wg.Add(1) go func(name string) { defer wg.Done() _, child := tp.Start(ctx, name) psc := parent.SpanContext() csc := child.SpanContext() e.Expect(csc.TraceID()).ToEqual(psc.TraceID()) e.Expect(csc.SpanID()).NotToEqual(psc.SpanID()) }(fmt.Sprintf("span %d", i)) } wg.Wait() done <- struct{}{} }(tp) return done } e.Expect(func() { done := runner(tracer) <-done }).NotToPanic() }) }) h.testSpan(subjectFactory) } func (h *Harness) testSpan(tracerFactory func() trace.Tracer) { methods := map[string]func(span trace.Span){ "#End": func(span trace.Span) { span.End() }, "#AddEvent": func(span trace.Span) { span.AddEvent("test event") }, "#AddEventWithTimestamp": func(span trace.Span) { span.AddEvent("test event", trace.WithTimestamp(time.Now().Add(1*time.Second))) }, "#SetStatus": func(span trace.Span) { span.SetStatus(codes.Error, "internal") }, "#SetName": func(span trace.Span) { span.SetName("new name") }, "#SetAttributes": func(span trace.Span) { span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123)) }, } mechanisms := map[string]func() trace.Span{ "Span created via Tracer#Start": func() trace.Span { tracer := tracerFactory() _, subject := tracer.Start(context.Background(), "test") return subject }, "Span created via span.TracerProvider()": func() trace.Span { ctx, spanA := tracerFactory().Start(context.Background(), "span1") _, spanB := spanA.TracerProvider().Tracer("second").Start(ctx, "span2") return spanB }, } for mechanismName, mechanism := range mechanisms { h.t.Run(mechanismName, func(t *testing.T) { for methodName, method := range methods { t.Run(methodName, func(t *testing.T) { t.Run("is thread-safe", func(t *testing.T) { t.Parallel() span := mechanism() wg := &sync.WaitGroup{} wg.Add(2) go func() { defer wg.Done() method(span) }() go func() { defer wg.Done() method(span) }() wg.Wait() }) }) } t.Run("#End", func(t *testing.T) { t.Run("can be called multiple times", func(t *testing.T) { t.Parallel() span := mechanism() span.End() span.End() }) }) }) } } type testCtxKey struct{} opentelemetry-go-1.21.0/sdk/internal/internaltest/text_map_carrier.go000066400000000000000000000073051452547353200261010ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_carrier.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/sdk/internal/internaltest" import ( "sync" "testing" "go.opentelemetry.io/otel/propagation" ) // TextMapCarrier is a storage medium for a TextMapPropagator used in testing. // The methods of a TextMapCarrier are concurrent safe. type TextMapCarrier struct { mtx sync.Mutex gets []string sets [][2]string data map[string]string } var _ propagation.TextMapCarrier = (*TextMapCarrier)(nil) // NewTextMapCarrier returns a new *TextMapCarrier populated with data. func NewTextMapCarrier(data map[string]string) *TextMapCarrier { copied := make(map[string]string, len(data)) for k, v := range data { copied[k] = v } return &TextMapCarrier{data: copied} } // Keys returns the keys for which this carrier has a value. func (c *TextMapCarrier) Keys() []string { c.mtx.Lock() defer c.mtx.Unlock() result := make([]string, 0, len(c.data)) for k := range c.data { result = append(result, k) } return result } // Get returns the value associated with the passed key. func (c *TextMapCarrier) Get(key string) string { c.mtx.Lock() defer c.mtx.Unlock() c.gets = append(c.gets, key) return c.data[key] } // GotKey tests if c.Get has been called for key. func (c *TextMapCarrier) GotKey(t *testing.T, key string) bool { c.mtx.Lock() defer c.mtx.Unlock() for _, k := range c.gets { if k == key { return true } } t.Errorf("TextMapCarrier.Get(%q) has not been called", key) return false } // GotN tests if n calls to c.Get have been made. func (c *TextMapCarrier) GotN(t *testing.T, n int) bool { c.mtx.Lock() defer c.mtx.Unlock() if len(c.gets) != n { t.Errorf("TextMapCarrier.Get was called %d times, not %d", len(c.gets), n) return false } return true } // Set stores the key-value pair. func (c *TextMapCarrier) Set(key, value string) { c.mtx.Lock() defer c.mtx.Unlock() c.sets = append(c.sets, [2]string{key, value}) c.data[key] = value } // SetKeyValue tests if c.Set has been called for the key-value pair. func (c *TextMapCarrier) SetKeyValue(t *testing.T, key, value string) bool { c.mtx.Lock() defer c.mtx.Unlock() var vals []string for _, pair := range c.sets { if key == pair[0] { if value == pair[1] { return true } vals = append(vals, pair[1]) } } if len(vals) > 0 { t.Errorf("TextMapCarrier.Set called with %q and %v values, but not %s", key, vals, value) } t.Errorf("TextMapCarrier.Set(%q,%q) has not been called", key, value) return false } // SetN tests if n calls to c.Set have been made. func (c *TextMapCarrier) SetN(t *testing.T, n int) bool { c.mtx.Lock() defer c.mtx.Unlock() if len(c.sets) != n { t.Errorf("TextMapCarrier.Set was called %d times, not %d", len(c.sets), n) return false } return true } // Reset zeros out the recording state and sets the carried values to data. func (c *TextMapCarrier) Reset(data map[string]string) { copied := make(map[string]string, len(data)) for k, v := range data { copied[k] = v } c.mtx.Lock() defer c.mtx.Unlock() c.gets = nil c.sets = nil c.data = copied } opentelemetry-go-1.21.0/sdk/internal/internaltest/text_map_carrier_test.go000066400000000000000000000046051452547353200271400ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_carrier_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "reflect" "testing" ) var key, value = "test", "true" func TestTextMapCarrierKeys(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) expected, actual := []string{key}, tmc.Keys() if !reflect.DeepEqual(actual, expected) { t.Errorf("expected tmc.Keys() to be %v but it was %v", expected, actual) } } func TestTextMapCarrierGet(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) tmc.GotN(t, 0) if got := tmc.Get("empty"); got != "" { t.Errorf("TextMapCarrier.Get returned %q for an empty key", got) } tmc.GotKey(t, "empty") tmc.GotN(t, 1) if got := tmc.Get(key); got != value { t.Errorf("TextMapCarrier.Get(%q) returned %q, want %q", key, got, value) } tmc.GotKey(t, key) tmc.GotN(t, 2) } func TestTextMapCarrierSet(t *testing.T) { tmc := NewTextMapCarrier(nil) tmc.SetN(t, 0) tmc.Set(key, value) if got, ok := tmc.data[key]; !ok { t.Errorf("TextMapCarrier.Set(%q,%q) failed to store pair", key, value) } else if got != value { t.Errorf("TextMapCarrier.Set(%q,%q) stored (%q,%q), not (%q,%q)", key, value, key, got, key, value) } tmc.SetKeyValue(t, key, value) tmc.SetN(t, 1) } func TestTextMapCarrierReset(t *testing.T) { tmc := NewTextMapCarrier(map[string]string{key: value}) tmc.GotN(t, 0) tmc.SetN(t, 0) tmc.Reset(nil) tmc.GotN(t, 0) tmc.SetN(t, 0) if got := tmc.Get(key); got != "" { t.Error("TextMapCarrier.Reset() failed to clear initial data") } tmc.GotN(t, 1) tmc.GotKey(t, key) tmc.Set(key, value) tmc.SetKeyValue(t, key, value) tmc.SetN(t, 1) tmc.Reset(nil) tmc.GotN(t, 0) tmc.SetN(t, 0) if got := tmc.Get(key); got != "" { t.Error("TextMapCarrier.Reset() failed to clear data") } } opentelemetry-go-1.21.0/sdk/internal/internaltest/text_map_propagator.go000066400000000000000000000067471452547353200266410ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_propagator.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest // import "go.opentelemetry.io/otel/sdk/internal/internaltest" import ( "context" "fmt" "strconv" "strings" "testing" "go.opentelemetry.io/otel/propagation" ) type ctxKeyType string type state struct { Injections uint64 Extractions uint64 } func newState(encoded string) state { if encoded == "" { return state{} } s0, s1, _ := strings.Cut(encoded, ",") injects, _ := strconv.ParseUint(s0, 10, 64) extracts, _ := strconv.ParseUint(s1, 10, 64) return state{ Injections: injects, Extractions: extracts, } } func (s state) String() string { return fmt.Sprintf("%d,%d", s.Injections, s.Extractions) } // TextMapPropagator is a propagation.TextMapPropagator used for testing. type TextMapPropagator struct { name string ctxKey ctxKeyType } var _ propagation.TextMapPropagator = (*TextMapPropagator)(nil) // NewTextMapPropagator returns a new TextMapPropagator for testing. It will // use name as the key it injects into a TextMapCarrier when Inject is called. func NewTextMapPropagator(name string) *TextMapPropagator { return &TextMapPropagator{name: name, ctxKey: ctxKeyType(name)} } func (p *TextMapPropagator) stateFromContext(ctx context.Context) state { if v := ctx.Value(p.ctxKey); v != nil { if s, ok := v.(state); ok { return s } } return state{} } func (p *TextMapPropagator) stateFromCarrier(carrier propagation.TextMapCarrier) state { return newState(carrier.Get(p.name)) } // Inject sets cross-cutting concerns for p from ctx into carrier. func (p *TextMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { s := p.stateFromContext(ctx) s.Injections++ carrier.Set(p.name, s.String()) } // InjectedN tests if p has made n injections to carrier. func (p *TextMapPropagator) InjectedN(t *testing.T, carrier *TextMapCarrier, n int) bool { if actual := p.stateFromCarrier(carrier).Injections; actual != uint64(n) { t.Errorf("TextMapPropagator{%q} injected %d times, not %d", p.name, actual, n) return false } return true } // Extract reads cross-cutting concerns for p from carrier into ctx. func (p *TextMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { s := p.stateFromCarrier(carrier) s.Extractions++ return context.WithValue(ctx, p.ctxKey, s) } // ExtractedN tests if p has made n extractions from the lineage of ctx. // nolint (context is not first arg) func (p *TextMapPropagator) ExtractedN(t *testing.T, ctx context.Context, n int) bool { if actual := p.stateFromContext(ctx).Extractions; actual != uint64(n) { t.Errorf("TextMapPropagator{%q} extracted %d time, not %d", p.name, actual, n) return false } return true } // Fields returns the name of p as the key who's value is set with Inject. func (p *TextMapPropagator) Fields() []string { return []string{p.name} } opentelemetry-go-1.21.0/sdk/internal/internaltest/text_map_propagator_test.go000066400000000000000000000044141452547353200276650ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/internaltest/text_map_propagator_test.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internaltest import ( "context" "testing" ) func TestTextMapPropagatorInjectExtract(t *testing.T) { name := "testing" ctx := context.Background() carrier := NewTextMapCarrier(map[string]string{name: value}) propagator := NewTextMapPropagator(name) propagator.Inject(ctx, carrier) // Carrier value overridden with state. if carrier.SetKeyValue(t, name, "1,0") { // Ensure nothing has been extracted yet. propagator.ExtractedN(t, ctx, 0) // Test the injection was counted. propagator.InjectedN(t, carrier, 1) } ctx = propagator.Extract(ctx, carrier) v := ctx.Value(ctxKeyType(name)) if v == nil { t.Error("TextMapPropagator.Extract failed to extract state") } if s, ok := v.(state); !ok { t.Error("TextMapPropagator.Extract did not extract proper state") } else if s.Extractions != 1 { t.Error("TextMapPropagator.Extract did not increment state.Extractions") } if carrier.GotKey(t, name) { // Test the extraction was counted. propagator.ExtractedN(t, ctx, 1) // Ensure no additional injection was recorded. propagator.InjectedN(t, carrier, 1) } } func TestTextMapPropagatorFields(t *testing.T) { name := "testing" propagator := NewTextMapPropagator(name) if got := propagator.Fields(); len(got) != 1 { t.Errorf("TextMapPropagator.Fields returned %d fields, want 1", len(got)) } else if got[0] != name { t.Errorf("TextMapPropagator.Fields returned %q, want %q", got[0], name) } } func TestNewStateEmpty(t *testing.T) { if want, got := (state{}), newState(""); got != want { t.Errorf("newState(\"\") returned %v, want %v", got, want) } } opentelemetry-go-1.21.0/sdk/internal/matchers/000077500000000000000000000000001452547353200213075ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/internal/matchers/expectation.go000066400000000000000000000175561452547353200241770ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/expectation.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/sdk/internal/matchers" import ( "fmt" "reflect" "regexp" "runtime/debug" "strings" "testing" "time" ) var stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`) type Expectation struct { t *testing.T actual interface{} } func (e *Expectation) ToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if !reflect.DeepEqual(e.actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nto equal\n\t%v", e.actual, expected)) } } func (e *Expectation) NotToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if reflect.DeepEqual(e.actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to equal\n\t%v", e.actual, expected)) } } func (e *Expectation) ToBeNil() { if e.actual != nil { e.fail(fmt.Sprintf("Expected\n\t%v\nto be nil", e.actual)) } } func (e *Expectation) NotToBeNil() { if e.actual == nil { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to be nil", e.actual)) } } func (e *Expectation) ToBeTrue() { switch a := e.actual.(type) { case bool: if !a { e.fail(fmt.Sprintf("Expected\n\t%v\nto be true", e.actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a)) } } func (e *Expectation) ToBeFalse() { switch a := e.actual.(type) { case bool: if a { e.fail(fmt.Sprintf("Expected\n\t%v\nto be false", e.actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a)) } } func (e *Expectation) NotToPanic() { switch a := e.actual.(type) { case func(): func() { defer func() { if recovered := recover(); recovered != nil { e.fail(fmt.Sprintf("Expected panic\n\t%v\nto have not been raised", recovered)) } }() a() }() default: e.fail(fmt.Sprintf("Cannot check if non-func value\n\t%v\nis truthy", a)) } } func (e *Expectation) ToSucceed() { switch actual := e.actual.(type) { case error: if actual != nil { e.fail(fmt.Sprintf("Expected error\n\t%v\nto have succeeded", actual)) } default: e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nsucceeded", actual)) } } func (e *Expectation) ToMatchError(expected interface{}) { e.verifyExpectedNotNil(expected) actual, ok := e.actual.(error) if !ok { e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nmatches error", e.actual)) } switch expected := expected.(type) { case error: if !reflect.DeepEqual(actual, expected) { e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected)) } case string: if actual.Error() != expected { e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected)) } default: e.fail(fmt.Sprintf("Cannot match\n\t%v\nagainst non-error\n\t%v", actual, expected)) } } func (e *Expectation) ToContain(expected interface{}) { actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() switch actualKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual)) return } expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: expectedValue = reflect.ValueOf([]interface{}{expected}) } for i := 0; i < expectedValue.Len(); i++ { var contained bool expectedElem := expectedValue.Index(i).Interface() for j := 0; j < actualValue.Len(); j++ { if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) { contained = true break } } if !contained { e.fail(fmt.Sprintf("Expected\n\t%v\nto contain\n\t%v", e.actual, expectedElem)) return } } } func (e *Expectation) NotToContain(expected interface{}) { actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() switch actualKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual)) return } expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: expectedValue = reflect.ValueOf([]interface{}{expected}) } for i := 0; i < expectedValue.Len(); i++ { expectedElem := expectedValue.Index(i).Interface() for j := 0; j < actualValue.Len(); j++ { if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) { e.fail(fmt.Sprintf("Expected\n\t%v\nnot to contain\n\t%v", e.actual, expectedElem)) return } } } } func (e *Expectation) ToMatchInAnyOrder(expected interface{}) { expectedValue := reflect.ValueOf(expected) expectedKind := expectedValue.Kind() switch expectedKind { case reflect.Array, reflect.Slice: default: e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", expected)) return } actualValue := reflect.ValueOf(e.actual) actualKind := actualValue.Kind() if actualKind != expectedKind { e.fail(fmt.Sprintf("Expected\n\t%v\nto be the same type as\n\t%v", e.actual, expected)) return } if actualValue.Len() != expectedValue.Len() { e.fail(fmt.Sprintf("Expected\n\t%v\nto have the same length as\n\t%v", e.actual, expected)) return } var unmatched []interface{} for i := 0; i < expectedValue.Len(); i++ { unmatched = append(unmatched, expectedValue.Index(i).Interface()) } for i := 0; i < actualValue.Len(); i++ { var found bool for j, elem := range unmatched { if reflect.DeepEqual(actualValue.Index(i).Interface(), elem) { found = true unmatched = append(unmatched[:j], unmatched[j+1:]...) break } } if !found { e.fail(fmt.Sprintf("Expected\n\t%v\nto contain the same elements as\n\t%v", e.actual, expected)) } } } func (e *Expectation) ToBeTemporally(matcher TemporalMatcher, compareTo interface{}) { if actual, ok := e.actual.(time.Time); ok { ct, ok := compareTo.(time.Time) if !ok { e.fail(fmt.Sprintf("Cannot compare to non-temporal value\n\t%v", compareTo)) return } switch matcher { case Before: if !actual.Before(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before\n\t%v", e.actual, compareTo)) } case BeforeOrSameTime: if actual.After(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before or at the same time as\n\t%v", e.actual, compareTo)) } case After: if !actual.After(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after\n\t%v", e.actual, compareTo)) } case AfterOrSameTime: if actual.Before(ct) { e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after or at the same time as\n\t%v", e.actual, compareTo)) } default: e.fail("Cannot compare times with unexpected temporal matcher") } return } e.fail(fmt.Sprintf("Cannot compare non-temporal value\n\t%v", e.actual)) } func (e *Expectation) verifyExpectedNotNil(expected interface{}) { if expected == nil { e.fail("Refusing to compare with . Use `ToBeNil` or `NotToBeNil` instead.") } } func (e *Expectation) fail(msg string) { // Prune the stack trace so that it's easier to see relevant lines stack := strings.Split(string(debug.Stack()), "\n") var prunedStack []string for _, line := range stack { if !stackTracePruneRE.MatchString(line) { prunedStack = append(prunedStack, line) } } e.t.Fatalf("\n%s\n%s\n", strings.Join(prunedStack, "\n"), msg) } opentelemetry-go-1.21.0/sdk/internal/matchers/expecter.go000066400000000000000000000020071452547353200234540ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/expecter.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/sdk/internal/matchers" import ( "testing" ) type Expecter struct { t *testing.T } func NewExpecter(t *testing.T) *Expecter { return &Expecter{ t: t, } } func (a *Expecter) Expect(actual interface{}) *Expectation { return &Expectation{ t: a.t, actual: actual, } } opentelemetry-go-1.21.0/sdk/internal/matchers/temporal_matcher.go000066400000000000000000000017261452547353200251720ustar00rootroot00000000000000// Code created by gotmpl. DO NOT MODIFY. // source: internal/shared/matchers/temporal_matcher.go.tmpl // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package matchers // import "go.opentelemetry.io/otel/sdk/internal/matchers" type TemporalMatcher byte //nolint:revive // ignoring missing comments for unexported constants in an internal package const ( Before TemporalMatcher = iota BeforeOrSameTime After AfterOrSameTime ) opentelemetry-go-1.21.0/sdk/metric/000077500000000000000000000000001452547353200171505ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/metric/aggregation.go000066400000000000000000000164271452547353200220000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "errors" "fmt" ) // errAgg is wrapped by misconfigured aggregations. var errAgg = errors.New("aggregation") // Aggregation is the aggregation used to summarize recorded measurements. type Aggregation interface { // copy returns a deep copy of the Aggregation. copy() Aggregation // err returns an error for any misconfigured Aggregation. err() error } // AggregationDrop is an Aggregation that drops all recorded data. type AggregationDrop struct{} // AggregationDrop has no parameters. var _ Aggregation = AggregationDrop{} // copy returns a deep copy of d. func (d AggregationDrop) copy() Aggregation { return d } // err returns an error for any misconfiguration. A drop aggregation has no // parameters and cannot be misconfigured, therefore this always returns nil. func (AggregationDrop) err() error { return nil } // AggregationDefault is an Aggregation that uses the default instrument kind selection // mapping to select another Aggregation. A metric reader can be configured to // make an aggregation selection based on instrument kind that differs from // the default. This Aggregation ensures the default is used. // // See the [DefaultAggregationSelector] for information about the default // instrument kind selection mapping. type AggregationDefault struct{} // AggregationDefault has no parameters. var _ Aggregation = AggregationDefault{} // copy returns a deep copy of d. func (d AggregationDefault) copy() Aggregation { return d } // err returns an error for any misconfiguration. A default aggregation has no // parameters and cannot be misconfigured, therefore this always returns nil. func (AggregationDefault) err() error { return nil } // AggregationSum is an Aggregation that summarizes a set of measurements as their // arithmetic sum. type AggregationSum struct{} // AggregationSum has no parameters. var _ Aggregation = AggregationSum{} // copy returns a deep copy of s. func (s AggregationSum) copy() Aggregation { return s } // err returns an error for any misconfiguration. A sum aggregation has no // parameters and cannot be misconfigured, therefore this always returns nil. func (AggregationSum) err() error { return nil } // AggregationLastValue is an Aggregation that summarizes a set of measurements as the // last one made. type AggregationLastValue struct{} // AggregationLastValue has no parameters. var _ Aggregation = AggregationLastValue{} // copy returns a deep copy of l. func (l AggregationLastValue) copy() Aggregation { return l } // err returns an error for any misconfiguration. A last-value aggregation has // no parameters and cannot be misconfigured, therefore this always returns // nil. func (AggregationLastValue) err() error { return nil } // AggregationExplicitBucketHistogram is an Aggregation that summarizes a set of // measurements as an histogram with explicitly defined buckets. type AggregationExplicitBucketHistogram struct { // Boundaries are the increasing bucket boundary values. Boundary values // define bucket upper bounds. Buckets are exclusive of their lower // boundary and inclusive of their upper bound (except at positive // infinity). A measurement is defined to fall into the greatest-numbered // bucket with a boundary that is greater than or equal to the // measurement. As an example, boundaries defined as: // // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000} // // Will define these buckets: // // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0], // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0], // (500.0, 1000.0], (1000.0, +∞) Boundaries []float64 // NoMinMax indicates whether to not record the min and max of the // distribution. By default, these extrema are recorded. // // Recording these extrema for cumulative data is expected to have little // value, they will represent the entire life of the instrument instead of // just the current collection cycle. It is recommended to set this to true // for that type of data to avoid computing the low-value extrema. NoMinMax bool } var _ Aggregation = AggregationExplicitBucketHistogram{} // errHist is returned by misconfigured ExplicitBucketHistograms. var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg) // err returns an error for any misconfiguration. func (h AggregationExplicitBucketHistogram) err() error { if len(h.Boundaries) <= 1 { return nil } // Check boundaries are monotonic. i := h.Boundaries[0] for _, j := range h.Boundaries[1:] { if i >= j { return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries) } i = j } return nil } // copy returns a deep copy of h. func (h AggregationExplicitBucketHistogram) copy() Aggregation { b := make([]float64, len(h.Boundaries)) copy(b, h.Boundaries) return AggregationExplicitBucketHistogram{ Boundaries: b, NoMinMax: h.NoMinMax, } } // AggregationBase2ExponentialHistogram is an Aggregation that summarizes a set of // measurements as an histogram with bucket widths that grow exponentially. type AggregationBase2ExponentialHistogram struct { // MaxSize is the maximum number of buckets to use for the histogram. MaxSize int32 // MaxScale is the maximum resolution scale to use for the histogram. // // MaxScale has a maximum value of 20. Using a value of 20 means the // maximum number of buckets that can fit within the range of a // signed 32-bit integer index could be used. // // MaxScale has a minimum value of -10. Using a value of -10 means only // two buckets will be used. MaxScale int32 // NoMinMax indicates whether to not record the min and max of the // distribution. By default, these extrema are recorded. // // Recording these extrema for cumulative data is expected to have little // value, they will represent the entire life of the instrument instead of // just the current collection cycle. It is recommended to set this to true // for that type of data to avoid computing the low-value extrema. NoMinMax bool } var _ Aggregation = AggregationBase2ExponentialHistogram{} // copy returns a deep copy of the Aggregation. func (e AggregationBase2ExponentialHistogram) copy() Aggregation { return e } const ( expoMaxScale = 20 expoMinScale = -10 ) // errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms. var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg) // err returns an error for any misconfigured Aggregation. func (e AggregationBase2ExponentialHistogram) err() error { if e.MaxScale > expoMaxScale { return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale) } if e.MaxSize <= 0 { return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize) } return nil } opentelemetry-go-1.21.0/sdk/metric/aggregation_test.go000066400000000000000000000052511452547353200230300ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "testing" "github.com/stretchr/testify/assert" ) func TestAggregationErr(t *testing.T) { t.Run("DropOperation", func(t *testing.T) { assert.NoError(t, AggregationDrop{}.err()) }) t.Run("SumOperation", func(t *testing.T) { assert.NoError(t, AggregationSum{}.err()) }) t.Run("LastValueOperation", func(t *testing.T) { assert.NoError(t, AggregationLastValue{}.err()) }) t.Run("ExplicitBucketHistogramOperation", func(t *testing.T) { assert.NoError(t, AggregationExplicitBucketHistogram{}.err()) assert.NoError(t, AggregationExplicitBucketHistogram{ Boundaries: []float64{0}, NoMinMax: true, }.err()) assert.NoError(t, AggregationExplicitBucketHistogram{ Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}, }.err()) }) t.Run("NonmonotonicHistogramBoundaries", func(t *testing.T) { assert.ErrorIs(t, AggregationExplicitBucketHistogram{ Boundaries: []float64{2, 1}, }.err(), errAgg) assert.ErrorIs(t, AggregationExplicitBucketHistogram{ Boundaries: []float64{0, 1, 2, 1, 3, 4}, }.err(), errAgg) }) t.Run("ExponentialHistogramOperation", func(t *testing.T) { assert.NoError(t, AggregationBase2ExponentialHistogram{ MaxSize: 160, MaxScale: 20, }.err()) assert.NoError(t, AggregationBase2ExponentialHistogram{ MaxSize: 1, NoMinMax: true, }.err()) assert.NoError(t, AggregationBase2ExponentialHistogram{ MaxSize: 1024, MaxScale: -3, }.err()) }) t.Run("InvalidExponentialHistogramOperation", func(t *testing.T) { // MazSize must be greater than 0 assert.ErrorIs(t, AggregationBase2ExponentialHistogram{}.err(), errAgg) // MaxScale Must be <=20 assert.ErrorIs(t, AggregationBase2ExponentialHistogram{ MaxSize: 1, MaxScale: 30, }.err(), errAgg) }) } func TestExplicitBucketHistogramDeepCopy(t *testing.T) { const orig = 0.0 b := []float64{orig} h := AggregationExplicitBucketHistogram{Boundaries: b} cpH := h.copy().(AggregationExplicitBucketHistogram) b[0] = orig + 1 assert.Equal(t, orig, cpH.Boundaries[0], "changing the underlying slice data should not affect the copy") } opentelemetry-go-1.21.0/sdk/metric/benchmark_test.go000066400000000000000000000260341452547353200224750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "strconv" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) var viewBenchmarks = []struct { Name string Views []View }{ {"NoView", []View{}}, { "DropView", []View{NewView( Instrument{Name: "*"}, Stream{Aggregation: AggregationDrop{}}, )}, }, { "AttrFilterView", []View{NewView( Instrument{Name: "*"}, Stream{AttributeFilter: attribute.NewAllowKeysFilter("K")}, )}, }, } func BenchmarkSyncMeasure(b *testing.B) { for _, bc := range viewBenchmarks { b.Run(bc.Name, benchSyncViews(bc.Views...)) } } func benchSyncViews(views ...View) func(*testing.B) { ctx := context.Background() rdr := NewManualReader() provider := NewMeterProvider(WithReader(rdr), WithView(views...)) meter := provider.Meter("benchSyncViews") return func(b *testing.B) { iCtr, err := meter.Int64Counter("int64-counter") assert.NoError(b, err) b.Run("Int64Counter", benchMeasAttrs(func() measF { return func(s attribute.Set) func() { o := []metric.AddOption{metric.WithAttributeSet(s)} return func() { iCtr.Add(ctx, 1, o...) } } }())) fCtr, err := meter.Float64Counter("float64-counter") assert.NoError(b, err) b.Run("Float64Counter", benchMeasAttrs(func() measF { return func(s attribute.Set) func() { o := []metric.AddOption{metric.WithAttributeSet(s)} return func() { fCtr.Add(ctx, 1, o...) } } }())) iUDCtr, err := meter.Int64UpDownCounter("int64-up-down-counter") assert.NoError(b, err) b.Run("Int64UpDownCounter", benchMeasAttrs(func() measF { return func(s attribute.Set) func() { o := []metric.AddOption{metric.WithAttributeSet(s)} return func() { iUDCtr.Add(ctx, 1, o...) } } }())) fUDCtr, err := meter.Float64UpDownCounter("float64-up-down-counter") assert.NoError(b, err) b.Run("Float64UpDownCounter", benchMeasAttrs(func() measF { return func(s attribute.Set) func() { o := []metric.AddOption{metric.WithAttributeSet(s)} return func() { fUDCtr.Add(ctx, 1, o...) } } }())) iHist, err := meter.Int64Histogram("int64-histogram") assert.NoError(b, err) b.Run("Int64Histogram", benchMeasAttrs(func() measF { return func(s attribute.Set) func() { o := []metric.RecordOption{metric.WithAttributeSet(s)} return func() { iHist.Record(ctx, 1, o...) } } }())) fHist, err := meter.Float64Histogram("float64-histogram") assert.NoError(b, err) b.Run("Float64Histogram", benchMeasAttrs(func() measF { return func(s attribute.Set) func() { o := []metric.RecordOption{metric.WithAttributeSet(s)} return func() { fHist.Record(ctx, 1, o...) } } }())) } } type measF func(s attribute.Set) func() func benchMeasAttrs(meas measF) func(*testing.B) { return func(b *testing.B) { b.Run("Attributes/0", func(b *testing.B) { f := meas(*attribute.EmptySet()) b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { f() } }) b.Run("Attributes/1", func(b *testing.B) { f := meas(attribute.NewSet(attribute.Bool("K", true))) b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { f() } }) b.Run("Attributes/10", func(b *testing.B) { n := 10 attrs := make([]attribute.KeyValue, 0) attrs = append(attrs, attribute.Bool("K", true)) for i := 2; i < n; i++ { attrs = append(attrs, attribute.Int(strconv.Itoa(i), i)) } f := meas(attribute.NewSet(attrs...)) b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { f() } }) } } func BenchmarkCollect(b *testing.B) { for _, bc := range viewBenchmarks { b.Run(bc.Name, benchCollectViews(bc.Views...)) } } func benchCollectViews(views ...View) func(*testing.B) { setup := func(name string) (metric.Meter, Reader) { r := NewManualReader() mp := NewMeterProvider(WithReader(r), WithView(views...)) return mp.Meter(name), r } ctx := context.Background() return func(b *testing.B) { b.Run("Int64Counter/1", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64Counter") i, err := m.Int64Counter("int64-counter") assert.NoError(b, err) i.Add(ctx, 1, metric.WithAttributeSet(s)) return r })) b.Run("Int64Counter/10", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64Counter") i, err := m.Int64Counter("int64-counter") assert.NoError(b, err) for n := 0; n < 10; n++ { i.Add(ctx, 1, metric.WithAttributeSet(s)) } return r })) b.Run("Float64Counter/1", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64Counter") i, err := m.Float64Counter("float64-counter") assert.NoError(b, err) i.Add(ctx, 1, metric.WithAttributeSet(s)) return r })) b.Run("Float64Counter/10", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64Counter") i, err := m.Float64Counter("float64-counter") assert.NoError(b, err) for n := 0; n < 10; n++ { i.Add(ctx, 1, metric.WithAttributeSet(s)) } return r })) b.Run("Int64UpDownCounter/1", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64UpDownCounter") i, err := m.Int64UpDownCounter("int64-up-down-counter") assert.NoError(b, err) i.Add(ctx, 1, metric.WithAttributeSet(s)) return r })) b.Run("Int64UpDownCounter/10", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64UpDownCounter") i, err := m.Int64UpDownCounter("int64-up-down-counter") assert.NoError(b, err) for n := 0; n < 10; n++ { i.Add(ctx, 1, metric.WithAttributeSet(s)) } return r })) b.Run("Float64UpDownCounter/1", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64UpDownCounter") i, err := m.Float64UpDownCounter("float64-up-down-counter") assert.NoError(b, err) i.Add(ctx, 1, metric.WithAttributeSet(s)) return r })) b.Run("Float64UpDownCounter/10", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64UpDownCounter") i, err := m.Float64UpDownCounter("float64-up-down-counter") assert.NoError(b, err) for n := 0; n < 10; n++ { i.Add(ctx, 1, metric.WithAttributeSet(s)) } return r })) b.Run("Int64Histogram/1", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64Histogram") i, err := m.Int64Histogram("int64-histogram") assert.NoError(b, err) i.Record(ctx, 1, metric.WithAttributeSet(s)) return r })) b.Run("Int64Histogram/10", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64Histogram") i, err := m.Int64Histogram("int64-histogram") assert.NoError(b, err) for n := 0; n < 10; n++ { i.Record(ctx, 1, metric.WithAttributeSet(s)) } return r })) b.Run("Float64Histogram/1", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64Histogram") i, err := m.Float64Histogram("float64-histogram") assert.NoError(b, err) i.Record(ctx, 1, metric.WithAttributeSet(s)) return r })) b.Run("Float64Histogram/10", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64Histogram") i, err := m.Float64Histogram("float64-histogram") assert.NoError(b, err) for n := 0; n < 10; n++ { i.Record(ctx, 1, metric.WithAttributeSet(s)) } return r })) b.Run("Int64ObservableCounter", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64ObservableCounter") _, err := m.Int64ObservableCounter( "int64-observable-counter", metric.WithInt64Callback(int64Cback(s)), ) assert.NoError(b, err) return r })) b.Run("Float64ObservableCounter", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64ObservableCounter") _, err := m.Float64ObservableCounter( "float64-observable-counter", metric.WithFloat64Callback(float64Cback(s)), ) assert.NoError(b, err) return r })) b.Run("Int64ObservableUpDownCounter", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64ObservableUpDownCounter") _, err := m.Int64ObservableUpDownCounter( "int64-observable-up-down-counter", metric.WithInt64Callback(int64Cback(s)), ) assert.NoError(b, err) return r })) b.Run("Float64ObservableUpDownCounter", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64ObservableUpDownCounter") _, err := m.Float64ObservableUpDownCounter( "float64-observable-up-down-counter", metric.WithFloat64Callback(float64Cback(s)), ) assert.NoError(b, err) return r })) b.Run("Int64ObservableGauge", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Int64ObservableGauge") _, err := m.Int64ObservableGauge( "int64-observable-gauge", metric.WithInt64Callback(int64Cback(s)), ) assert.NoError(b, err) return r })) b.Run("Float64ObservableGauge", benchCollectAttrs(func(s attribute.Set) Reader { m, r := setup("benchCollectViews/Float64ObservableGauge") _, err := m.Float64ObservableGauge( "float64-observable-gauge", metric.WithFloat64Callback(float64Cback(s)), ) assert.NoError(b, err) return r })) } } func int64Cback(s attribute.Set) metric.Int64Callback { opt := []metric.ObserveOption{metric.WithAttributeSet(s)} return func(_ context.Context, o metric.Int64Observer) error { o.Observe(1, opt...) return nil } } func float64Cback(s attribute.Set) metric.Float64Callback { opt := []metric.ObserveOption{metric.WithAttributeSet(s)} return func(_ context.Context, o metric.Float64Observer) error { o.Observe(1, opt...) return nil } } func benchCollectAttrs(setup func(attribute.Set) Reader) func(*testing.B) { ctx := context.Background() out := new(metricdata.ResourceMetrics) run := func(reader Reader) func(b *testing.B) { return func(b *testing.B) { b.ReportAllocs() for n := 0; n < b.N; n++ { _ = reader.Collect(ctx, out) } } } return func(b *testing.B) { b.Run("Attributes/0", run(setup(*attribute.EmptySet()))) attrs := []attribute.KeyValue{attribute.Bool("K", true)} b.Run("Attributes/1", run(setup(attribute.NewSet(attrs...)))) for i := 2; i < 10; i++ { attrs = append(attrs, attribute.Int(strconv.Itoa(i), i)) } b.Run("Attributes/10", run(setup(attribute.NewSet(attrs...)))) } } opentelemetry-go-1.21.0/sdk/metric/cache.go000066400000000000000000000030021452547353200205350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "sync" ) // cache is a locking storage used to quickly return already computed values. // // The zero value of a cache is empty and ready to use. // // A cache must not be copied after first use. // // All methods of a cache are safe to call concurrently. type cache[K comparable, V any] struct { sync.Mutex data map[K]V } // Lookup returns the value stored in the cache with the associated key if it // exists. Otherwise, f is called and its returned value is set in the cache // for key and returned. // // Lookup is safe to call concurrently. It will hold the cache lock, so f // should not block excessively. func (c *cache[K, V]) Lookup(key K, f func() V) V { c.Lock() defer c.Unlock() if c.data == nil { val := f() c.data = map[K]V{key: val} return val } if v, ok := c.data[key]; ok { return v } val := f() c.data[key] = val return val } opentelemetry-go-1.21.0/sdk/metric/cache_test.go000066400000000000000000000033061452547353200216030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestCache(t *testing.T) { k0, k1 := "one", "two" v0, v1 := 1, 2 c := cache[string, int]{} var got int require.NotPanics(t, func() { got = c.Lookup(k0, func() int { return v0 }) }, "zero-value cache panics on Lookup") assert.Equal(t, v0, got, "zero-value cache did not return fallback") assert.Equal(t, v0, c.Lookup(k0, func() int { return v1 }), "existing key") assert.Equal(t, v1, c.Lookup(k1, func() int { return v1 }), "non-existing key") } func TestCacheConcurrentSafe(t *testing.T) { const ( key = "k" goroutines = 10 ) c := cache[string, int]{} var wg sync.WaitGroup for n := 0; n < goroutines; n++ { wg.Add(1) go func(i int) { defer wg.Done() assert.NotPanics(t, func() { c.Lookup(key, func() int { return i }) }) }(n) } done := make(chan struct{}) go func() { wg.Wait() close(done) }() select { case <-done: case <-time.After(5 * time.Second): assert.Fail(t, "timeout") } } opentelemetry-go-1.21.0/sdk/metric/config.go000066400000000000000000000102701452547353200207440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "fmt" "sync" "go.opentelemetry.io/otel/sdk/resource" ) // config contains configuration options for a MeterProvider. type config struct { res *resource.Resource readers []Reader views []View } // readerSignals returns a force-flush and shutdown function for a // MeterProvider to call in their respective options. All Readers c contains // will have their force-flush and shutdown methods unified into returned // single functions. func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) { var fFuncs, sFuncs []func(context.Context) error for _, r := range c.readers { sFuncs = append(sFuncs, r.Shutdown) if f, ok := r.(interface{ ForceFlush(context.Context) error }); ok { fFuncs = append(fFuncs, f.ForceFlush) } } return unify(fFuncs), unifyShutdown(sFuncs) } // unify unifies calling all of funcs into a single function call. All errors // returned from calls to funcs will be unify into a single error return // value. func unify(funcs []func(context.Context) error) func(context.Context) error { return func(ctx context.Context) error { var errs []error for _, f := range funcs { if err := f(ctx); err != nil { errs = append(errs, err) } } return unifyErrors(errs) } } // unifyErrors combines multiple errors into a single error. func unifyErrors(errs []error) error { switch len(errs) { case 0: return nil case 1: return errs[0] default: return fmt.Errorf("%v", errs) } } // unifyShutdown unifies calling all of funcs once for a shutdown. If called // more than once, an ErrReaderShutdown error is returned. func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error { f := unify(funcs) var once sync.Once return func(ctx context.Context) error { err := ErrReaderShutdown once.Do(func() { err = f(ctx) }) return err } } // newConfig returns a config configured with options. func newConfig(options []Option) config { conf := config{res: resource.Default()} for _, o := range options { conf = o.apply(conf) } return conf } // Option applies a configuration option value to a MeterProvider. type Option interface { apply(config) config } // optionFunc applies a set of options to a config. type optionFunc func(config) config // apply returns a config with option(s) applied. func (o optionFunc) apply(conf config) config { return o(conf) } // WithResource associates a Resource with a MeterProvider. This Resource // represents the entity producing telemetry and is associated with all Meters // the MeterProvider will create. // // By default, if this Option is not used, the default Resource from the // go.opentelemetry.io/otel/sdk/resource package will be used. func WithResource(res *resource.Resource) Option { return optionFunc(func(conf config) config { conf.res = res return conf }) } // WithReader associates Reader r with a MeterProvider. // // By default, if this option is not used, the MeterProvider will perform no // operations; no data will be exported without a Reader. func WithReader(r Reader) Option { return optionFunc(func(cfg config) config { if r == nil { return cfg } cfg.readers = append(cfg.readers, r) return cfg }) } // WithView associates views a MeterProvider. // // Views are appended to existing ones in a MeterProvider if this option is // used multiple times. // // By default, if this option is not used, the MeterProvider will use the // default view. func WithView(views ...View) Option { return optionFunc(func(cfg config) config { cfg.views = append(cfg.views, views...) return cfg }) } opentelemetry-go-1.21.0/sdk/metric/config_test.go000066400000000000000000000102001452547353200217740ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "context" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) type reader struct { producer sdkProducer externalProducers []Producer temporalityFunc TemporalitySelector aggregationFunc AggregationSelector collectFunc func(context.Context, *metricdata.ResourceMetrics) error forceFlushFunc func(context.Context) error shutdownFunc func(context.Context) error } var _ Reader = (*reader)(nil) func (r *reader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. return r.aggregationFunc(kind) } func (r *reader) register(p sdkProducer) { r.producer = p } func (r *reader) RegisterProducer(p Producer) { r.externalProducers = append(r.externalProducers, p) } func (r *reader) temporality(kind InstrumentKind) metricdata.Temporality { return r.temporalityFunc(kind) } func (r *reader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { return r.collectFunc(ctx, rm) } func (r *reader) ForceFlush(ctx context.Context) error { return r.forceFlushFunc(ctx) } func (r *reader) Shutdown(ctx context.Context) error { return r.shutdownFunc(ctx) } func TestConfigReaderSignalsEmpty(t *testing.T) { f, s := config{}.readerSignals() require.NotNil(t, f) require.NotNil(t, s) ctx := context.Background() assert.Nil(t, f(ctx)) assert.Nil(t, s(ctx)) assert.ErrorIs(t, s(ctx), ErrReaderShutdown) } func TestConfigReaderSignalsForwarded(t *testing.T) { var flush, sdown int r := &reader{ forceFlushFunc: func(ctx context.Context) error { flush++ return nil }, shutdownFunc: func(ctx context.Context) error { sdown++ return nil }, } c := newConfig([]Option{WithReader(r)}) f, s := c.readerSignals() require.NotNil(t, f) require.NotNil(t, s) ctx := context.Background() assert.NoError(t, f(ctx)) assert.NoError(t, f(ctx)) assert.NoError(t, s(ctx)) assert.ErrorIs(t, s(ctx), ErrReaderShutdown) assert.Equal(t, 2, flush, "flush not called 2 times") assert.Equal(t, 1, sdown, "shutdown not called 1 time") } func TestConfigReaderSignalsForwardedErrors(t *testing.T) { r := &reader{ forceFlushFunc: func(ctx context.Context) error { return assert.AnError }, shutdownFunc: func(ctx context.Context) error { return assert.AnError }, } c := newConfig([]Option{WithReader(r)}) f, s := c.readerSignals() require.NotNil(t, f) require.NotNil(t, s) ctx := context.Background() assert.ErrorIs(t, f(ctx), assert.AnError) assert.ErrorIs(t, s(ctx), assert.AnError) assert.ErrorIs(t, s(ctx), ErrReaderShutdown) } func TestUnifyMultiError(t *testing.T) { f := func(context.Context) error { return assert.AnError } funcs := []func(context.Context) error{f, f, f} errs := []error{assert.AnError, assert.AnError, assert.AnError} target := fmt.Errorf("%v", errs) assert.Equal(t, unify(funcs)(context.Background()), target) } func TestWithResource(t *testing.T) { res := resource.NewSchemaless() c := newConfig([]Option{WithResource(res)}) assert.Same(t, res, c.res) } func TestWithReader(t *testing.T) { r := &reader{} c := newConfig([]Option{WithReader(r)}) require.Len(t, c.readers, 1) assert.Same(t, r, c.readers[0]) } func TestWithView(t *testing.T) { c := newConfig([]Option{WithView( NewView( Instrument{Kind: InstrumentKindObservableCounter}, Stream{Name: "a"}, ), NewView( Instrument{Kind: InstrumentKindCounter}, Stream{Name: "b"}, ), )}) assert.Len(t, c.views, 2) } opentelemetry-go-1.21.0/sdk/metric/doc.go000066400000000000000000000045731452547353200202550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package metric provides an implementation of the OpenTelemetry metrics SDK. // // See https://opentelemetry.io/docs/concepts/signals/metrics/ for information // about the concept of OpenTelemetry metrics and // https://opentelemetry.io/docs/concepts/components/ for more information // about OpenTelemetry SDKs. // // The entry point for the metric package is the MeterProvider. It is the // object that all API calls use to create Meters, instruments, and ultimately // make metric measurements. Also, it is an object that should be used to // control the life-cycle (start, flush, and shutdown) of the SDK. // // A MeterProvider needs to be configured to export the measured data, this is // done by configuring it with a Reader implementation (using the WithReader // MeterProviderOption). Readers take two forms: ones that push to an endpoint // (NewPeriodicReader), and ones that an endpoint pulls from. See // [go.opentelemetry.io/otel/exporters] for exporters that can be used as // or with these Readers. // // Each Reader, when registered with the MeterProvider, can be augmented with a // View. Views allow users that run OpenTelemetry instrumented code to modify // the generated data of that instrumentation. // // The data generated by a MeterProvider needs to include information about its // origin. A MeterProvider needs to be configured with a Resource, using the // WithResource MeterProviderOption, to include this information. This Resource // should be used to describe the unique runtime environment instrumented code // is being run on. That way when multiple instances of the code are collected // at a single endpoint their origin is decipherable. // // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. package metric // import "go.opentelemetry.io/otel/sdk/metric" opentelemetry-go-1.21.0/sdk/metric/env.go000066400000000000000000000032161452547353200202710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "os" "strconv" "time" "go.opentelemetry.io/otel/internal/global" ) // Environment variable names. const ( // The time interval (in milliseconds) between the start of two export attempts. envInterval = "OTEL_METRIC_EXPORT_INTERVAL" // Maximum allowed time (in milliseconds) to export data. envTimeout = "OTEL_METRIC_EXPORT_TIMEOUT" ) // envDuration returns an environment variable's value as duration in milliseconds if it is exists, // or the defaultValue if the environment variable is not defined or the value is not valid. func envDuration(key string, defaultValue time.Duration) time.Duration { v := os.Getenv(key) if v == "" { return defaultValue } d, err := strconv.Atoi(v) if err != nil { global.Error(err, "parse duration", "environment variable", key, "value", v) return defaultValue } if d <= 0 { global.Error(errNonPositiveDuration, "non-positive duration", "environment variable", key, "value", v) return defaultValue } return time.Duration(d) * time.Millisecond } opentelemetry-go-1.21.0/sdk/metric/example_test.go000066400000000000000000000165241452547353200222010ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric_test import ( "context" "fmt" "log" "regexp" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) // To enable metrics in your application using the SDK, // you'll need to have an initialized [MeterProvider] // that will let you create a [go.opentelemetry.io/otel/metric.Meter]. // // Here's how you might initialize a metrics provider. func Example() { // Create resource. res, err := resource.Merge(resource.Default(), resource.NewWithAttributes(semconv.SchemaURL, semconv.ServiceName("my-service"), semconv.ServiceVersion("0.1.0"), )) if err != nil { log.Fatalln(err) } // This reader is used as a stand-in for a reader that will actually export // data. See https://pkg.go.dev/go.opentelemetry.io/otel/exporters for // exporters that can be used as or with readers. reader := metric.NewManualReader() // Create a meter provider. // You can pass this instance directly to your instrumented code if it // accepts a MeterProvider instance. meterProvider := metric.NewMeterProvider( metric.WithResource(res), metric.WithReader(reader), ) // Handle shutdown properly so that nothing leaks. defer func() { err := meterProvider.Shutdown(context.Background()) if err != nil { log.Fatalln(err) } }() // Register as global meter provider so that it can be used via otel.Meter // and accessed using otel.GetMeterProvider. // Most instrumentation libraries use the global meter provider as default. // If the global meter provider is not set then a no-op implementation // is used, which fails to generate data. otel.SetMeterProvider(meterProvider) } func ExampleView() { // The NewView function provides convenient creation of common Views // construction. However, it is limited in what it can create. // // When NewView is not able to provide the functionally needed, a custom // View can be constructed directly. Here a custom View is constructed that // uses Go's regular expression matching to ensure all data stream names // have a suffix of the units it uses. re := regexp.MustCompile(`[._](ms|byte)$`) var view metric.View = func(i metric.Instrument) (metric.Stream, bool) { // In a custom View function, you need to explicitly copy // the name, description, and unit. s := metric.Stream{Name: i.Name, Description: i.Description, Unit: i.Unit} // Any instrument that does not have a unit suffix defined, but has a // dimensional unit defined, update the name with a unit suffix. if re.MatchString(i.Name) { return s, false } switch i.Unit { case "ms": s.Name += ".ms" case "By": s.Name += ".byte" default: return s, false } return s, true } // The created view can then be registered with the OpenTelemetry metric // SDK using the WithView option. _ = metric.NewMeterProvider( metric.WithView(view), ) // Below is an example of how the view will // function in the SDK for certain instruments. stream, _ := view(metric.Instrument{ Name: "computation.time.ms", Unit: "ms", }) fmt.Println("name:", stream.Name) stream, _ = view(metric.Instrument{ Name: "heap.size", Unit: "By", }) fmt.Println("name:", stream.Name) // Output: // name: computation.time.ms // name: heap.size.byte } func ExampleNewView() { // Create a view that renames the "latency" instrument from the v0.34.0 // version of the "http" instrumentation library as "request.latency". view := metric.NewView(metric.Instrument{ Name: "latency", Scope: instrumentation.Scope{ Name: "http", Version: "0.34.0", }, }, metric.Stream{Name: "request.latency"}) // The created view can then be registered with the OpenTelemetry metric // SDK using the WithView option. _ = metric.NewMeterProvider( metric.WithView(view), ) // Below is an example of how the view will // function in the SDK for certain instruments. stream, _ := view(metric.Instrument{ Name: "latency", Description: "request latency", Unit: "ms", Kind: metric.InstrumentKindCounter, Scope: instrumentation.Scope{ Name: "http", Version: "0.34.0", SchemaURL: "https://opentelemetry.io/schemas/1.0.0", }, }) fmt.Println("name:", stream.Name) fmt.Println("description:", stream.Description) fmt.Println("unit:", stream.Unit) // Output: // name: request.latency // description: request latency // unit: ms } func ExampleNewView_wildcard() { // Create a view that sets unit to milliseconds for any instrument with a // name suffix of ".ms". view := metric.NewView( metric.Instrument{Name: "*.ms"}, metric.Stream{Unit: "ms"}, ) // The created view can then be registered with the OpenTelemetry metric // SDK using the WithView option. _ = metric.NewMeterProvider( metric.WithView(view), ) // Below is an example of how the view will // function in the SDK for certain instruments. stream, _ := view(metric.Instrument{ Name: "computation.time.ms", Unit: "1", }) fmt.Println("name:", stream.Name) fmt.Println("unit:", stream.Unit) // Output: // name: computation.time.ms // unit: ms } func ExampleNewView_drop() { // Create a view that drops the "latency" instrument from the "http" // instrumentation library. view := metric.NewView( metric.Instrument{ Name: "latency", Scope: instrumentation.Scope{Name: "http"}, }, metric.Stream{Aggregation: metric.AggregationDrop{}}, ) // The created view can then be registered with the OpenTelemetry metric // SDK using the WithView option. _ = metric.NewMeterProvider( metric.WithView(view), ) } func ExampleNewView_attributeFilter() { // Create a view that removes the "http.request.method" attribute recorded // by the "latency" instrument from the "http" instrumentation library. view := metric.NewView( metric.Instrument{ Name: "latency", Scope: instrumentation.Scope{Name: "http"}, }, metric.Stream{AttributeFilter: attribute.NewDenyKeysFilter("http.request.method")}, ) // The created view can then be registered with the OpenTelemetry metric // SDK using the WithView option. _ = metric.NewMeterProvider( metric.WithView(view), ) } func ExampleNewView_exponentialHistogram() { // Create a view that makes the "latency" instrument from the "http" // instrumentation library to be reported as an exponential histogram. view := metric.NewView( metric.Instrument{ Name: "latency", Scope: instrumentation.Scope{Name: "http"}, }, metric.Stream{ Aggregation: metric.AggregationBase2ExponentialHistogram{ MaxSize: 160, MaxScale: 20, }, }, ) // The created view can then be registered with the OpenTelemetry metric // SDK using the WithView option. _ = metric.NewMeterProvider( metric.WithView(view), ) } opentelemetry-go-1.21.0/sdk/metric/exporter.go000066400000000000000000000070501452547353200213510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "fmt" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // ErrExporterShutdown is returned if Export or Shutdown are called after an // Exporter has been Shutdown. var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") // Exporter handles the delivery of metric data to external receivers. This is // the final component in the metric push pipeline. type Exporter interface { // Temporality returns the Temporality to use for an instrument kind. // // This method needs to be concurrent safe with itself and all the other // Exporter methods. Temporality(InstrumentKind) metricdata.Temporality // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Aggregation returns the Aggregation to use for an instrument kind. // // This method needs to be concurrent safe with itself and all the other // Exporter methods. Aggregation(InstrumentKind) Aggregation // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Export serializes and transmits metric data to a receiver. // // This is called synchronously, there is no concurrency safety // requirement. Because of this, it is critical that all timeouts and // cancellations of the passed context be honored. // // All retry logic must be contained in this function. The SDK does not // implement any retry logic. All errors returned by this function are // considered unrecoverable and will be reported to a configured error // Handler. // // The passed ResourceMetrics may be reused when the call completes. If an // exporter needs to hold this data after it returns, it needs to make a // copy. Export(context.Context, *metricdata.ResourceMetrics) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // ForceFlush flushes any metric data held by an exporter. // // The deadline or cancellation of the passed context must be honored. An // appropriate error should be returned in these situations. // // This method needs to be concurrent safe. ForceFlush(context.Context) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Shutdown flushes all metric data held by an exporter and releases any // held computational resources. // // The deadline or cancellation of the passed context must be honored. An // appropriate error should be returned in these situations. // // After Shutdown is called, calls to Export will perform no operation and // instead will return an error indicating the shutdown state. // // This method needs to be concurrent safe. Shutdown(context.Context) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } opentelemetry-go-1.21.0/sdk/metric/go.mod000066400000000000000000000013131452547353200202540ustar00rootroot00000000000000module go.opentelemetry.io/otel/sdk/metric go 1.20 require ( github.com/go-logr/logr v1.3.0 github.com/go-logr/stdr v1.2.2 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/sys v0.14.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel => ../.. replace go.opentelemetry.io/otel/metric => ../../metric replace go.opentelemetry.io/otel/trace => ../../trace replace go.opentelemetry.io/otel/sdk => ../ opentelemetry-go-1.21.0/sdk/metric/go.sum000066400000000000000000000027721452547353200203130ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/sdk/metric/instrument.go000066400000000000000000000255061452547353200217170ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:generate stringer -type=InstrumentKind -trimprefix=InstrumentKind package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "errors" "fmt" "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) var ( zeroInstrumentKind InstrumentKind zeroScope instrumentation.Scope ) // InstrumentKind is the identifier of a group of instruments that all // performing the same function. type InstrumentKind uint8 const ( // instrumentKindUndefined is an undefined instrument kind, it should not // be used by any initialized type. instrumentKindUndefined InstrumentKind = iota // nolint:deadcode,varcheck,unused // InstrumentKindCounter identifies a group of instruments that record // increasing values synchronously with the code path they are measuring. InstrumentKindCounter // InstrumentKindUpDownCounter identifies a group of instruments that // record increasing and decreasing values synchronously with the code path // they are measuring. InstrumentKindUpDownCounter // InstrumentKindHistogram identifies a group of instruments that record a // distribution of values synchronously with the code path they are // measuring. InstrumentKindHistogram // InstrumentKindObservableCounter identifies a group of instruments that // record increasing values in an asynchronous callback. InstrumentKindObservableCounter // InstrumentKindObservableUpDownCounter identifies a group of instruments // that record increasing and decreasing values in an asynchronous // callback. InstrumentKindObservableUpDownCounter // InstrumentKindObservableGauge identifies a group of instruments that // record current values in an asynchronous callback. InstrumentKindObservableGauge ) type nonComparable [0]func() // nolint: unused // This is indeed used. // Instrument describes properties an instrument is created with. type Instrument struct { // Name is the human-readable identifier of the instrument. Name string // Description describes the purpose of the instrument. Description string // Kind defines the functional group of the instrument. Kind InstrumentKind // Unit is the unit of measurement recorded by the instrument. Unit string // Scope identifies the instrumentation that created the instrument. Scope instrumentation.Scope // Ensure forward compatibility if non-comparable fields need to be added. nonComparable // nolint: unused } // empty returns if all fields of i are their zero-value. func (i Instrument) empty() bool { return i.Name == "" && i.Description == "" && i.Kind == zeroInstrumentKind && i.Unit == "" && i.Scope == zeroScope } // matches returns whether all the non-zero-value fields of i match the // corresponding fields of other. If i is empty it will match all other, and // true will always be returned. func (i Instrument) matches(other Instrument) bool { return i.matchesName(other) && i.matchesDescription(other) && i.matchesKind(other) && i.matchesUnit(other) && i.matchesScope(other) } // matchesName returns true if the Name of i is "" or it equals the Name of // other, otherwise false. func (i Instrument) matchesName(other Instrument) bool { return i.Name == "" || i.Name == other.Name } // matchesDescription returns true if the Description of i is "" or it equals // the Description of other, otherwise false. func (i Instrument) matchesDescription(other Instrument) bool { return i.Description == "" || i.Description == other.Description } // matchesKind returns true if the Kind of i is its zero-value or it equals the // Kind of other, otherwise false. func (i Instrument) matchesKind(other Instrument) bool { return i.Kind == zeroInstrumentKind || i.Kind == other.Kind } // matchesUnit returns true if the Unit of i is its zero-value or it equals the // Unit of other, otherwise false. func (i Instrument) matchesUnit(other Instrument) bool { return i.Unit == "" || i.Unit == other.Unit } // matchesScope returns true if the Scope of i is its zero-value or it equals // the Scope of other, otherwise false. func (i Instrument) matchesScope(other Instrument) bool { return (i.Scope.Name == "" || i.Scope.Name == other.Scope.Name) && (i.Scope.Version == "" || i.Scope.Version == other.Scope.Version) && (i.Scope.SchemaURL == "" || i.Scope.SchemaURL == other.Scope.SchemaURL) } // Stream describes the stream of data an instrument produces. type Stream struct { // Name is the human-readable identifier of the stream. Name string // Description describes the purpose of the data. Description string // Unit is the unit of measurement recorded. Unit string // Aggregation the stream uses for an instrument. Aggregation Aggregation // AttributeFilter is an attribute Filter applied to the attributes // recorded for an instrument's measurement. If the filter returns false // the attribute will not be recorded, otherwise, if it returns true, it // will record the attribute. // // Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to // provide an allow-list of attribute keys here. AttributeFilter attribute.Filter } // instID are the identifying properties of a instrument. type instID struct { // Name is the name of the stream. Name string // Description is the description of the stream. Description string // Kind defines the functional group of the instrument. Kind InstrumentKind // Unit is the unit of the stream. Unit string // Number is the number type of the stream. Number string } // Returns a normalized copy of the instID i. // // Instrument names are considered case-insensitive. Standardize the instrument // name to always be lowercase for the returned instID so it can be compared // without the name casing affecting the comparison. func (i instID) normalize() instID { i.Name = strings.ToLower(i.Name) return i } type int64Inst struct { measures []aggregate.Measure[int64] embedded.Int64Counter embedded.Int64UpDownCounter embedded.Int64Histogram } var ( _ metric.Int64Counter = (*int64Inst)(nil) _ metric.Int64UpDownCounter = (*int64Inst)(nil) _ metric.Int64Histogram = (*int64Inst)(nil) ) func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) { c := metric.NewAddConfig(opts) i.aggregate(ctx, val, c.Attributes()) } func (i *int64Inst) Record(ctx context.Context, val int64, opts ...metric.RecordOption) { c := metric.NewRecordConfig(opts) i.aggregate(ctx, val, c.Attributes()) } func (i *int64Inst) aggregate(ctx context.Context, val int64, s attribute.Set) { // nolint:revive // okay to shadow pkg with method. if err := ctx.Err(); err != nil { return } for _, in := range i.measures { in(ctx, val, s) } } type float64Inst struct { measures []aggregate.Measure[float64] embedded.Float64Counter embedded.Float64UpDownCounter embedded.Float64Histogram } var ( _ metric.Float64Counter = (*float64Inst)(nil) _ metric.Float64UpDownCounter = (*float64Inst)(nil) _ metric.Float64Histogram = (*float64Inst)(nil) ) func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) { c := metric.NewAddConfig(opts) i.aggregate(ctx, val, c.Attributes()) } func (i *float64Inst) Record(ctx context.Context, val float64, opts ...metric.RecordOption) { c := metric.NewRecordConfig(opts) i.aggregate(ctx, val, c.Attributes()) } func (i *float64Inst) aggregate(ctx context.Context, val float64, s attribute.Set) { if err := ctx.Err(); err != nil { return } for _, in := range i.measures { in(ctx, val, s) } } // observablID is a comparable unique identifier of an observable. type observablID[N int64 | float64] struct { name string description string kind InstrumentKind unit string scope instrumentation.Scope } type float64Observable struct { metric.Float64Observable *observable[float64] embedded.Float64ObservableCounter embedded.Float64ObservableUpDownCounter embedded.Float64ObservableGauge } var ( _ metric.Float64ObservableCounter = float64Observable{} _ metric.Float64ObservableUpDownCounter = float64Observable{} _ metric.Float64ObservableGauge = float64Observable{} ) func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[float64]) float64Observable { return float64Observable{ observable: newObservable(m, kind, name, desc, u, meas), } } type int64Observable struct { metric.Int64Observable *observable[int64] embedded.Int64ObservableCounter embedded.Int64ObservableUpDownCounter embedded.Int64ObservableGauge } var ( _ metric.Int64ObservableCounter = int64Observable{} _ metric.Int64ObservableUpDownCounter = int64Observable{} _ metric.Int64ObservableGauge = int64Observable{} ) func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[int64]) int64Observable { return int64Observable{ observable: newObservable(m, kind, name, desc, u, meas), } } type observable[N int64 | float64] struct { metric.Observable observablID[N] meter *meter measures []aggregate.Measure[N] } func newObservable[N int64 | float64](m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[N]) *observable[N] { return &observable[N]{ observablID: observablID[N]{ name: name, description: desc, kind: kind, unit: u, scope: m.scope, }, meter: m, measures: meas, } } // observe records the val for the set of attrs. func (o *observable[N]) observe(val N, s attribute.Set) { for _, in := range o.measures { in(context.Background(), val, s) } } var errEmptyAgg = errors.New("no aggregators for observable instrument") // registerable returns an error if the observable o should not be registered, // and nil if it should. An errEmptyAgg error is returned if o is effectively a // no-op because it does not have any aggregators. Also, an error is returned // if scope defines a Meter other than the one o was created by. func (o *observable[N]) registerable(m *meter) error { if len(o.measures) == 0 { return errEmptyAgg } if m != o.meter { return fmt.Errorf( "invalid registration: observable %q from Meter %q, registered with Meter %q", o.name, o.scope.Name, m.scope.Name, ) } return nil } opentelemetry-go-1.21.0/sdk/metric/instrument_test.go000066400000000000000000000041271452547353200227520ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "context" "testing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func BenchmarkInstrument(b *testing.B) { attr := func(id int) attribute.Set { return attribute.NewSet( attribute.String("user", "Alice"), attribute.Bool("admin", true), attribute.Int("id", id), ) } b.Run("instrumentImpl/aggregate", func(b *testing.B) { build := aggregate.Builder[int64]{} var meas []aggregate.Measure[int64] in, _ := build.LastValue() meas = append(meas, in) build.Temporality = metricdata.CumulativeTemporality in, _ = build.Sum(true) meas = append(meas, in) build.Temporality = metricdata.DeltaTemporality in, _ = build.Sum(true) meas = append(meas, in) inst := int64Inst{measures: meas} ctx := context.Background() b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { inst.aggregate(ctx, int64(i), attr(i)) } }) b.Run("observable/observe", func(b *testing.B) { build := aggregate.Builder[int64]{} var meas []aggregate.Measure[int64] in, _ := build.LastValue() meas = append(meas, in) build.Temporality = metricdata.CumulativeTemporality in, _ = build.Sum(true) meas = append(meas, in) build.Temporality = metricdata.DeltaTemporality in, _ = build.Sum(true) meas = append(meas, in) o := observable[int64]{measures: meas} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { o.observe(int64(i), attr(i)) } }) } opentelemetry-go-1.21.0/sdk/metric/instrumentkind_string.go000066400000000000000000000020231452547353200241400ustar00rootroot00000000000000// Code generated by "stringer -type=InstrumentKind -trimprefix=InstrumentKind"; DO NOT EDIT. package metric import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[instrumentKindUndefined-0] _ = x[InstrumentKindCounter-1] _ = x[InstrumentKindUpDownCounter-2] _ = x[InstrumentKindHistogram-3] _ = x[InstrumentKindObservableCounter-4] _ = x[InstrumentKindObservableUpDownCounter-5] _ = x[InstrumentKindObservableGauge-6] } const _InstrumentKind_name = "instrumentKindUndefinedCounterUpDownCounterHistogramObservableCounterObservableUpDownCounterObservableGauge" var _InstrumentKind_index = [...]uint8{0, 23, 30, 43, 52, 69, 92, 107} func (i InstrumentKind) String() string { if i >= InstrumentKind(len(_InstrumentKind_index)-1) { return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" } return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] } opentelemetry-go-1.21.0/sdk/metric/internal/000077500000000000000000000000001452547353200207645ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/000077500000000000000000000000001452547353200227125ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/aggregate.go000066400000000000000000000107711452547353200251750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // now is used to return the current local time while allowing tests to // override the default time.Now function. var now = time.Now // Measure receives measurements to be aggregated. type Measure[N int64 | float64] func(context.Context, N, attribute.Set) // ComputeAggregation stores the aggregate of measurements into dest and // returns the number of aggregate data-points output. type ComputeAggregation func(dest *metricdata.Aggregation) int // Builder builds an aggregate function. type Builder[N int64 | float64] struct { // Temporality is the temporality used for the returned aggregate function. // // If this is not provided a default of cumulative will be used (except for // the last-value aggregate function where delta is the only appropriate // temporality). Temporality metricdata.Temporality // Filter is the attribute filter the aggregate function will use on the // input of measurements. Filter attribute.Filter } func (b Builder[N]) filter(f Measure[N]) Measure[N] { if b.Filter != nil { fltr := b.Filter // Copy to make it immutable after assignment. return func(ctx context.Context, n N, a attribute.Set) { fAttr, _ := a.Filter(fltr) f(ctx, n, fAttr) } } return f } // LastValue returns a last-value aggregate function input and output. // // The Builder.Temporality is ignored and delta is use always. func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { // Delta temporality is the only temporality that makes semantic sense for // a last-value aggregate. lv := newLastValue[N]() return b.filter(lv.measure), func(dest *metricdata.Aggregation) int { // Ignore if dest is not a metricdata.Gauge. The chance for memory // reuse of the DataPoints is missed (better luck next time). gData, _ := (*dest).(metricdata.Gauge[N]) lv.computeAggregation(&gData.DataPoints) *dest = gData return len(gData.DataPoints) } } // PrecomputedSum returns a sum aggregate function input and output. The // arguments passed to the input are expected to be the precomputed sum values. func (b Builder[N]) PrecomputedSum(monotonic bool) (Measure[N], ComputeAggregation) { s := newPrecomputedSum[N](monotonic) switch b.Temporality { case metricdata.DeltaTemporality: return b.filter(s.measure), s.delta default: return b.filter(s.measure), s.cumulative } } // Sum returns a sum aggregate function input and output. func (b Builder[N]) Sum(monotonic bool) (Measure[N], ComputeAggregation) { s := newSum[N](monotonic) switch b.Temporality { case metricdata.DeltaTemporality: return b.filter(s.measure), s.delta default: return b.filter(s.measure), s.cumulative } } // ExplicitBucketHistogram returns a histogram aggregate function input and // output. func (b Builder[N]) ExplicitBucketHistogram(boundaries []float64, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { h := newHistogram[N](boundaries, noMinMax, noSum) switch b.Temporality { case metricdata.DeltaTemporality: return b.filter(h.measure), h.delta default: return b.filter(h.measure), h.cumulative } } // ExponentialBucketHistogram returns a histogram aggregate function input and // output. func (b Builder[N]) ExponentialBucketHistogram(maxSize, maxScale int32, noMinMax, noSum bool) (Measure[N], ComputeAggregation) { h := newExponentialHistogram[N](maxSize, maxScale, noMinMax, noSum) switch b.Temporality { case metricdata.DeltaTemporality: return b.filter(h.measure), h.delta default: return b.filter(h.measure), h.cumulative } } // reset ensures s has capacity and sets it length. If the capacity of s too // small, a new slice is returned with the specified capacity and length. func reset[T any](s []T, length, capacity int) []T { if cap(s) < capacity { return make([]T, length, capacity) } return s[:length] } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/aggregate_test.go000066400000000000000000000105621452547353200262320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "strconv" "testing" "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" ) var ( keyUser = "user" userAlice = attribute.String(keyUser, "Alice") userBob = attribute.String(keyUser, "Bob") adminTrue = attribute.Bool("admin", true) adminFalse = attribute.Bool("admin", false) alice = attribute.NewSet(userAlice, adminTrue) bob = attribute.NewSet(userBob, adminFalse) // Filtered. attrFltr = func(kv attribute.KeyValue) bool { return kv.Key == attribute.Key(keyUser) } fltrAlice = attribute.NewSet(userAlice) fltrBob = attribute.NewSet(userBob) // Sat Jan 01 2000 00:00:00 GMT+0000. staticTime = time.Unix(946684800, 0) staticNowFunc = func() time.Time { return staticTime } // Pass to t.Cleanup to override the now function with staticNowFunc and // revert once the test completes. E.g. t.Cleanup(mockTime(now)). mockTime = func(orig func() time.Time) (cleanup func()) { now = staticNowFunc return func() { now = orig } } ) func TestBuilderFilter(t *testing.T) { t.Run("Int64", testBuilderFilter[int64]()) t.Run("Float64", testBuilderFilter[float64]()) } func testBuilderFilter[N int64 | float64]() func(t *testing.T) { return func(t *testing.T) { t.Helper() value, attr := N(1), alice run := func(b Builder[N], wantA attribute.Set) func(*testing.T) { return func(t *testing.T) { t.Helper() meas := b.filter(func(_ context.Context, v N, a attribute.Set) { assert.Equal(t, value, v, "measured incorrect value") assert.Equal(t, wantA, a, "measured incorrect attributes") }) meas(context.Background(), value, attr) } } t.Run("NoFilter", run(Builder[N]{}, attr)) t.Run("Filter", run(Builder[N]{Filter: attrFltr}, fltrAlice)) } } type arg[N int64 | float64] struct { ctx context.Context value N attr attribute.Set } type output struct { n int agg metricdata.Aggregation } type teststep[N int64 | float64] struct { input []arg[N] expect output } func test[N int64 | float64](meas Measure[N], comp ComputeAggregation, steps []teststep[N]) func(*testing.T) { return func(t *testing.T) { t.Helper() got := new(metricdata.Aggregation) for i, step := range steps { for _, args := range step.input { meas(args.ctx, args.value, args.attr) } t.Logf("step: %d", i) assert.Equal(t, step.expect.n, comp(got), "incorrect data size") metricdatatest.AssertAggregationsEqual(t, step.expect.agg, *got) } } } func benchmarkAggregate[N int64 | float64](factory func() (Measure[N], ComputeAggregation)) func(*testing.B) { counts := []int{1, 10, 100} return func(b *testing.B) { for _, n := range counts { b.Run(strconv.Itoa(n), func(b *testing.B) { benchmarkAggregateN(b, factory, n) }) } } } var bmarkRes metricdata.Aggregation func benchmarkAggregateN[N int64 | float64](b *testing.B, factory func() (Measure[N], ComputeAggregation), count int) { ctx := context.Background() attrs := make([]attribute.Set, count) for i := range attrs { attrs[i] = attribute.NewSet(attribute.Int("value", i)) } b.Run("Measure", func(b *testing.B) { got := &bmarkRes meas, comp := factory() b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { for _, attr := range attrs { meas(ctx, 1, attr) } } comp(got) }) b.Run("ComputeAggregation", func(b *testing.B) { comps := make([]ComputeAggregation, b.N) for n := range comps { meas, comp := factory() for _, attr := range attrs { meas(ctx, 1, attr) } comps[n] = comp } got := &bmarkRes b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { comps[n](got) } }) } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/doc.go000066400000000000000000000015701452547353200240110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package aggregate provides aggregate types used compute aggregations and // cycle the state of metric measurements made by the SDK. These types and // functionality are meant only for internal SDK use. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/exponential_histogram.go000066400000000000000000000262461452547353200276560ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "errors" "math" "sync" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) const ( expoMaxScale = 20 expoMinScale = -10 smallestNonZeroNormalFloat64 = 0x1p-1022 // These redefine the Math constants with a type, so the compiler won't coerce // them into an int on 32 bit platforms. maxInt64 int64 = math.MaxInt64 minInt64 int64 = math.MinInt64 ) // expoHistogramDataPoint is a single data point in an exponential histogram. type expoHistogramDataPoint[N int64 | float64] struct { count uint64 min N max N sum N maxSize int noMinMax bool noSum bool scale int posBuckets expoBuckets negBuckets expoBuckets zeroCount uint64 } func newExpoHistogramDataPoint[N int64 | float64](maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] { f := math.MaxFloat64 max := N(f) // if N is int64, max will overflow to -9223372036854775808 min := N(-f) if N(maxInt64) > N(f) { max = N(maxInt64) min = N(minInt64) } return &expoHistogramDataPoint[N]{ min: max, max: min, maxSize: maxSize, noMinMax: noMinMax, noSum: noSum, scale: maxScale, } } // record adds a new measurement to the histogram. It will rescale the buckets if needed. func (p *expoHistogramDataPoint[N]) record(v N) { p.count++ if !p.noMinMax { if v < p.min { p.min = v } if v > p.max { p.max = v } } if !p.noSum { p.sum += v } absV := math.Abs(float64(v)) if float64(absV) == 0.0 { p.zeroCount++ return } bin := p.getBin(absV) bucket := &p.posBuckets if v < 0 { bucket = &p.negBuckets } // If the new bin would make the counts larger than maxScale, we need to // downscale current measurements. if scaleDelta := p.scaleChange(bin, bucket.startBin, len(bucket.counts)); scaleDelta > 0 { if p.scale-scaleDelta < expoMinScale { // With a scale of -10 there is only two buckets for the whole range of float64 values. // This can only happen if there is a max size of 1. otel.Handle(errors.New("exponential histogram scale underflow")) return } // Downscale p.scale -= scaleDelta p.posBuckets.downscale(scaleDelta) p.negBuckets.downscale(scaleDelta) bin = p.getBin(absV) } bucket.record(bin) } // getBin returns the bin v should be recorded into. func (p *expoHistogramDataPoint[N]) getBin(v float64) int { frac, exp := math.Frexp(v) if p.scale <= 0 { // Because of the choice of fraction is always 1 power of two higher than we want. correction := 1 if frac == .5 { // If v is an exact power of two the frac will be .5 and the exp // will be one higher than we want. correction = 2 } return (exp - correction) >> (-p.scale) } return exp<= bin { low = bin high = startBin + length - 1 } count := 0 for high-low >= p.maxSize { low = low >> 1 high = high >> 1 count++ if count > expoMaxScale-expoMinScale { return count } } return count } // expoBuckets is a set of buckets in an exponential histogram. type expoBuckets struct { startBin int counts []uint64 } // record increments the count for the given bin, and expands the buckets if needed. // Size changes must be done before calling this function. func (b *expoBuckets) record(bin int) { if len(b.counts) == 0 { b.counts = []uint64{1} b.startBin = bin return } endBin := b.startBin + len(b.counts) - 1 // if the new bin is inside the current range if bin >= b.startBin && bin <= endBin { b.counts[bin-b.startBin]++ return } // if the new bin is before the current start add spaces to the counts if bin < b.startBin { origLen := len(b.counts) newLength := endBin - bin + 1 shift := b.startBin - bin if newLength > cap(b.counts) { b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) } copy(b.counts[shift:origLen+shift], b.counts[:]) b.counts = b.counts[:newLength] for i := 1; i < shift; i++ { b.counts[i] = 0 } b.startBin = bin b.counts[0] = 1 return } // if the new is after the end add spaces to the end if bin > endBin { if bin-b.startBin < cap(b.counts) { b.counts = b.counts[:bin-b.startBin+1] for i := endBin + 1 - b.startBin; i < len(b.counts); i++ { b.counts[i] = 0 } b.counts[bin-b.startBin] = 1 return } end := make([]uint64, bin-b.startBin-len(b.counts)+1) b.counts = append(b.counts, end...) b.counts[bin-b.startBin] = 1 } } // downscale shrinks a bucket by a factor of 2*s. It will sum counts into the // correct lower resolution bucket. func (b *expoBuckets) downscale(delta int) { // Example // delta = 2 // Original offset: -6 // Counts: [ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] // bins: -6 -5, -4, -3, -2, -1, 0, 1, 2, 3, 4 // new bins:-2, -2, -1, -1, -1, -1, 0, 0, 0, 0, 1 // new Offset: -2 // new Counts: [4, 14, 30, 10] if len(b.counts) <= 1 || delta < 1 { b.startBin = b.startBin >> delta return } steps := 1 << delta offset := b.startBin % steps offset = (offset + steps) % steps // to make offset positive for i := 1; i < len(b.counts); i++ { idx := i + offset if idx%steps == 0 { b.counts[idx/steps] = b.counts[i] continue } b.counts[idx/steps] += b.counts[i] } lastIdx := (len(b.counts) - 1 + offset) / steps b.counts = b.counts[:lastIdx+1] b.startBin = b.startBin >> delta } // newExponentialHistogram returns an Aggregator that summarizes a set of // measurements as an exponential histogram. Each histogram is scoped by attributes // and the aggregation cycle the measurements were made in. func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMax, noSum bool) *expoHistogram[N] { return &expoHistogram[N]{ noSum: noSum, noMinMax: noMinMax, maxSize: int(maxSize), maxScale: int(maxScale), values: make(map[attribute.Set]*expoHistogramDataPoint[N]), start: now(), } } // expoHistogram summarizes a set of measurements as an histogram with exponentially // defined buckets. type expoHistogram[N int64 | float64] struct { noSum bool noMinMax bool maxSize int maxScale int values map[attribute.Set]*expoHistogramDataPoint[N] valuesMu sync.Mutex start time.Time } func (e *expoHistogram[N]) measure(_ context.Context, value N, attr attribute.Set) { // Ignore NaN and infinity. if math.IsInf(float64(value), 0) || math.IsNaN(float64(value)) { return } e.valuesMu.Lock() defer e.valuesMu.Unlock() v, ok := e.values[attr] if !ok { v = newExpoHistogramDataPoint[N](e.maxSize, e.maxScale, e.noMinMax, e.noSum) e.values[attr] = v } v.record(value) } func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { t := now() // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. // In that case, use the zero-value h and hope for better alignment next cycle. h, _ := (*dest).(metricdata.ExponentialHistogram[N]) h.Temporality = metricdata.DeltaTemporality e.valuesMu.Lock() defer e.valuesMu.Unlock() n := len(e.values) hDPts := reset(h.DataPoints, n, n) var i int for a, b := range e.values { hDPts[i].Attributes = a hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = b.count hDPts[i].Scale = int32(b.scale) hDPts[i].ZeroCount = b.zeroCount hDPts[i].ZeroThreshold = 0.0 hDPts[i].PositiveBucket.Offset = int32(b.posBuckets.startBin) hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(b.posBuckets.counts), len(b.posBuckets.counts)) copy(hDPts[i].PositiveBucket.Counts, b.posBuckets.counts) hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin) hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts)) if !e.noSum { hDPts[i].Sum = b.sum } if !e.noMinMax { hDPts[i].Min = metricdata.NewExtrema(b.min) hDPts[i].Max = metricdata.NewExtrema(b.max) } delete(e.values, a) i++ } e.start = t h.DataPoints = hDPts *dest = h return n } func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { t := now() // If *dest is not a metricdata.ExponentialHistogram, memory reuse is missed. // In that case, use the zero-value h and hope for better alignment next cycle. h, _ := (*dest).(metricdata.ExponentialHistogram[N]) h.Temporality = metricdata.CumulativeTemporality e.valuesMu.Lock() defer e.valuesMu.Unlock() n := len(e.values) hDPts := reset(h.DataPoints, n, n) var i int for a, b := range e.values { hDPts[i].Attributes = a hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = b.count hDPts[i].Scale = int32(b.scale) hDPts[i].ZeroCount = b.zeroCount hDPts[i].ZeroThreshold = 0.0 hDPts[i].PositiveBucket.Offset = int32(b.posBuckets.startBin) hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(b.posBuckets.counts), len(b.posBuckets.counts)) copy(hDPts[i].PositiveBucket.Counts, b.posBuckets.counts) hDPts[i].NegativeBucket.Offset = int32(b.negBuckets.startBin) hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(b.negBuckets.counts), len(b.negBuckets.counts)) if !e.noSum { hDPts[i].Sum = b.sum } if !e.noMinMax { hDPts[i].Min = metricdata.NewExtrema(b.min) hDPts[i].Max = metricdata.NewExtrema(b.max) } i++ // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. } h.DataPoints = hDPts *dest = h return n } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/exponential_histogram_test.go000066400000000000000000000526701452547353200307150ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate import ( "context" "fmt" "math" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" ) type noErrorHandler struct{ t *testing.T } func (h *noErrorHandler) Handle(e error) { require.NoError(h.t, e) } func withHandler(t *testing.T) func() { t.Helper() h := &noErrorHandler{t: t} original := global.GetErrorHandler() global.SetErrorHandler(h) return func() { global.SetErrorHandler(original) } } func TestExpoHistogramDataPointRecord(t *testing.T) { t.Run("float64", testExpoHistogramDataPointRecord[float64]) t.Run("float64 MinMaxSum", testExpoHistogramMinMaxSumFloat64) t.Run("float64-2", testExpoHistogramDataPointRecordFloat64) t.Run("int64", testExpoHistogramDataPointRecord[int64]) t.Run("int64 MinMaxSum", testExpoHistogramMinMaxSumInt64) } func testExpoHistogramDataPointRecord[N int64 | float64](t *testing.T) { testCases := []struct { maxSize int values []N expectedBuckets expoBuckets expectedScale int }{ { maxSize: 4, values: []N{2, 4, 1}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{1, 1, 1}, }, expectedScale: 0, }, { maxSize: 4, values: []N{4, 4, 4, 2, 16, 1}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{1, 4, 1}, }, expectedScale: -1, }, { maxSize: 2, values: []N{1, 2, 4}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{1, 2}, }, expectedScale: -1, }, { maxSize: 2, values: []N{1, 4, 2}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{1, 2}, }, expectedScale: -1, }, { maxSize: 2, values: []N{2, 4, 1}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{1, 2}, }, expectedScale: -1, }, { maxSize: 2, values: []N{2, 1, 4}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{1, 2}, }, expectedScale: -1, }, { maxSize: 2, values: []N{4, 1, 2}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{1, 2}, }, expectedScale: -1, }, { maxSize: 2, values: []N{4, 2, 1}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{1, 2}, }, expectedScale: -1, }, } for _, tt := range testCases { t.Run(fmt.Sprint(tt.values), func(t *testing.T) { restore := withHandler(t) defer restore() dp := newExpoHistogramDataPoint[N](tt.maxSize, 20, false, false) for _, v := range tt.values { dp.record(v) dp.record(-v) } assert.Equal(t, tt.expectedBuckets, dp.posBuckets, "positive buckets") assert.Equal(t, tt.expectedBuckets, dp.negBuckets, "negative buckets") assert.Equal(t, tt.expectedScale, dp.scale, "scale") }) } } // TODO: This can be defined in the test after we drop support for go1.19. type expectedMinMaxSum[N int64 | float64] struct { min N max N sum N count uint } type expoHistogramDataPointRecordMinMaxSumTestCase[N int64 | float64] struct { values []N expected expectedMinMaxSum[N] } func testExpoHistogramMinMaxSumInt64(t *testing.T) { testCases := []expoHistogramDataPointRecordMinMaxSumTestCase[int64]{ { values: []int64{2, 4, 1}, expected: expectedMinMaxSum[int64]{1, 4, 7, 3}, }, { values: []int64{4, 4, 4, 2, 16, 1}, expected: expectedMinMaxSum[int64]{1, 16, 31, 6}, }, } for _, tt := range testCases { t.Run(fmt.Sprint(tt.values), func(t *testing.T) { restore := withHandler(t) defer restore() h := newExponentialHistogram[int64](4, 20, false, false) for _, v := range tt.values { h.measure(context.Background(), v, alice) } dp := h.values[alice] assert.Equal(t, tt.expected.max, dp.max) assert.Equal(t, tt.expected.min, dp.min) assert.Equal(t, tt.expected.sum, dp.sum) }) } } func testExpoHistogramMinMaxSumFloat64(t *testing.T) { testCases := []expoHistogramDataPointRecordMinMaxSumTestCase[float64]{ { values: []float64{2, 4, 1}, expected: expectedMinMaxSum[float64]{1, 4, 7, 3}, }, { values: []float64{2, 4, 1, math.Inf(1)}, expected: expectedMinMaxSum[float64]{1, 4, 7, 4}, }, { values: []float64{2, 4, 1, math.Inf(-1)}, expected: expectedMinMaxSum[float64]{1, 4, 7, 4}, }, { values: []float64{2, 4, 1, math.NaN()}, expected: expectedMinMaxSum[float64]{1, 4, 7, 4}, }, { values: []float64{4, 4, 4, 2, 16, 1}, expected: expectedMinMaxSum[float64]{1, 16, 31, 6}, }, } for _, tt := range testCases { t.Run(fmt.Sprint(tt.values), func(t *testing.T) { restore := withHandler(t) defer restore() h := newExponentialHistogram[float64](4, 20, false, false) for _, v := range tt.values { h.measure(context.Background(), v, alice) } dp := h.values[alice] assert.Equal(t, tt.expected.max, dp.max) assert.Equal(t, tt.expected.min, dp.min) assert.Equal(t, tt.expected.sum, dp.sum) }) } } func testExpoHistogramDataPointRecordFloat64(t *testing.T) { type TestCase struct { maxSize int values []float64 expectedBuckets expoBuckets expectedScale int } testCases := []TestCase{ { maxSize: 4, values: []float64{2, 2, 2, 1, 8, 0.5}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{2, 3, 1}, }, expectedScale: -1, }, { maxSize: 2, values: []float64{1, 0.5, 2}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{2, 1}, }, expectedScale: -1, }, { maxSize: 2, values: []float64{1, 2, 0.5}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{2, 1}, }, expectedScale: -1, }, { maxSize: 2, values: []float64{2, 0.5, 1}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{2, 1}, }, expectedScale: -1, }, { maxSize: 2, values: []float64{2, 1, 0.5}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{2, 1}, }, expectedScale: -1, }, { maxSize: 2, values: []float64{0.5, 1, 2}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{2, 1}, }, expectedScale: -1, }, { maxSize: 2, values: []float64{0.5, 2, 1}, expectedBuckets: expoBuckets{ startBin: -1, counts: []uint64{2, 1}, }, expectedScale: -1, }, } for _, tt := range testCases { t.Run(fmt.Sprint(tt.values), func(t *testing.T) { restore := withHandler(t) defer restore() dp := newExpoHistogramDataPoint[float64](tt.maxSize, 20, false, false) for _, v := range tt.values { dp.record(v) dp.record(-v) } assert.Equal(t, tt.expectedBuckets, dp.posBuckets) assert.Equal(t, tt.expectedBuckets, dp.negBuckets) assert.Equal(t, tt.expectedScale, dp.scale) }) } } func TestExponentialHistogramDataPointRecordLimits(t *testing.T) { // These bins are calculated from the following formula: // floor( log2( value) * 2^20 ) using an arbitrary precision calculator. fdp := newExpoHistogramDataPoint[float64](4, 20, false, false) fdp.record(math.MaxFloat64) if fdp.posBuckets.startBin != 1073741823 { t.Errorf("Expected startBin to be 1073741823, got %d", fdp.posBuckets.startBin) } fdp = newExpoHistogramDataPoint[float64](4, 20, false, false) fdp.record(math.SmallestNonzeroFloat64) if fdp.posBuckets.startBin != -1126170625 { t.Errorf("Expected startBin to be -1126170625, got %d", fdp.posBuckets.startBin) } idp := newExpoHistogramDataPoint[int64](4, 20, false, false) idp.record(math.MaxInt64) if idp.posBuckets.startBin != 66060287 { t.Errorf("Expected startBin to be 66060287, got %d", idp.posBuckets.startBin) } } func TestExpoBucketDownscale(t *testing.T) { tests := []struct { name string bucket *expoBuckets scale int want *expoBuckets }{ { name: "Empty bucket", bucket: &expoBuckets{}, scale: 3, want: &expoBuckets{}, }, { name: "1 size bucket", bucket: &expoBuckets{ startBin: 50, counts: []uint64{7}, }, scale: 4, want: &expoBuckets{ startBin: 3, counts: []uint64{7}, }, }, { name: "zero scale", bucket: &expoBuckets{ startBin: 50, counts: []uint64{7, 5}, }, scale: 0, want: &expoBuckets{ startBin: 50, counts: []uint64{7, 5}, }, }, { name: "aligned bucket scale 1", bucket: &expoBuckets{ startBin: 0, counts: []uint64{1, 2, 3, 4, 5, 6}, }, scale: 1, want: &expoBuckets{ startBin: 0, counts: []uint64{3, 7, 11}, }, }, { name: "aligned bucket scale 2", bucket: &expoBuckets{ startBin: 0, counts: []uint64{1, 2, 3, 4, 5, 6}, }, scale: 2, want: &expoBuckets{ startBin: 0, counts: []uint64{10, 11}, }, }, { name: "aligned bucket scale 3", bucket: &expoBuckets{ startBin: 0, counts: []uint64{1, 2, 3, 4, 5, 6}, }, scale: 3, want: &expoBuckets{ startBin: 0, counts: []uint64{21}, }, }, { name: "unaligned bucket scale 1", bucket: &expoBuckets{ startBin: 5, counts: []uint64{1, 2, 3, 4, 5, 6}, }, // This is equivalent to [0,0,0,0,0,1,2,3,4,5,6] scale: 1, want: &expoBuckets{ startBin: 2, counts: []uint64{1, 5, 9, 6}, }, // This is equivalent to [0,0,1,5,9,6] }, { name: "unaligned bucket scale 2", bucket: &expoBuckets{ startBin: 7, counts: []uint64{1, 2, 3, 4, 5, 6}, }, // This is equivalent to [0,0,0,0,0,0,0,1,2,3,4,5,6] scale: 2, want: &expoBuckets{ startBin: 1, counts: []uint64{1, 14, 6}, }, // This is equivalent to [0,1,14,6] }, { name: "unaligned bucket scale 3", bucket: &expoBuckets{ startBin: 3, counts: []uint64{1, 2, 3, 4, 5, 6}, }, // This is equivalent to [0,0,0,1,2,3,4,5,6] scale: 3, want: &expoBuckets{ startBin: 0, counts: []uint64{15, 6}, }, // This is equivalent to [0,15,6] }, { name: "unaligned bucket scale 1", bucket: &expoBuckets{ startBin: 1, counts: []uint64{1, 0, 1}, }, scale: 1, want: &expoBuckets{ startBin: 0, counts: []uint64{1, 1}, }, }, { name: "negative startBin", bucket: &expoBuckets{ startBin: -1, counts: []uint64{1, 0, 3}, }, scale: 1, want: &expoBuckets{ startBin: -1, counts: []uint64{1, 3}, }, }, { name: "negative startBin 2", bucket: &expoBuckets{ startBin: -4, counts: []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, }, scale: 1, want: &expoBuckets{ startBin: -2, counts: []uint64{3, 7, 11, 15, 19}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.bucket.downscale(tt.scale) assert.Equal(t, tt.want, tt.bucket) }) } } func TestExpoBucketRecord(t *testing.T) { tests := []struct { name string bucket *expoBuckets bin int want *expoBuckets }{ { name: "Empty Bucket creates first count", bucket: &expoBuckets{}, bin: -5, want: &expoBuckets{ startBin: -5, counts: []uint64{1}, }, }, { name: "Bin is in the bucket", bucket: &expoBuckets{ startBin: 3, counts: []uint64{1, 2, 3, 4, 5, 6}, }, bin: 5, want: &expoBuckets{ startBin: 3, counts: []uint64{1, 2, 4, 4, 5, 6}, }, }, { name: "Bin is before the start of the bucket", bucket: &expoBuckets{ startBin: 1, counts: []uint64{1, 2, 3, 4, 5, 6}, }, bin: -2, want: &expoBuckets{ startBin: -2, counts: []uint64{1, 0, 0, 1, 2, 3, 4, 5, 6}, }, }, { name: "Bin is after the end of the bucket", bucket: &expoBuckets{ startBin: -2, counts: []uint64{1, 2, 3, 4, 5, 6}, }, bin: 4, want: &expoBuckets{ startBin: -2, counts: []uint64{1, 2, 3, 4, 5, 6, 1}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.bucket.record(tt.bin) assert.Equal(t, tt.want, tt.bucket) }) } } func TestScaleChange(t *testing.T) { type args struct { bin int startBin int length int maxSize int } tests := []struct { name string args args want int }{ { name: "if length is 0, no rescale is needed", // [] -> [5] Length 1 args: args{ bin: 5, startBin: 0, length: 0, maxSize: 4, }, want: 0, }, { name: "if bin is between start, and the end, no rescale needed", // [-1, ..., 8] Length 10 -> [-1, ..., 5, ..., 8] Length 10 args: args{ bin: 5, startBin: -1, length: 10, maxSize: 20, }, want: 0, }, { name: "if len([bin,... end]) > maxSize, rescale needed", // [8,9,10] Length 3 -> [5, ..., 10] Length 6 args: args{ bin: 5, startBin: 8, length: 3, maxSize: 5, }, want: 1, }, { name: "if len([start, ..., bin]) > maxSize, rescale needed", // [2,3,4] Length 3 -> [2, ..., 7] Length 6 args: args{ bin: 7, startBin: 2, length: 3, maxSize: 5, }, want: 1, }, { name: "if len([start, ..., bin]) > maxSize, rescale needed", // [2,3,4] Length 3 -> [2, ..., 7] Length 12 args: args{ bin: 13, startBin: 2, length: 3, maxSize: 5, }, want: 2, }, { name: "It should not hang if it will never be able to rescale", args: args{ bin: 1, startBin: -1, length: 1, maxSize: 1, }, want: 31, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := newExpoHistogramDataPoint[float64](tt.args.maxSize, 20, false, false) got := p.scaleChange(tt.args.bin, tt.args.startBin, tt.args.length) if got != tt.want { t.Errorf("scaleChange() = %v, want %v", got, tt.want) } }) } } func BenchmarkPrepend(b *testing.B) { for i := 0; i < b.N; i++ { agg := newExpoHistogramDataPoint[float64](1024, 20, false, false) n := math.MaxFloat64 for j := 0; j < 1024; j++ { agg.record(n) n = n / 2 } } } func BenchmarkAppend(b *testing.B) { for i := 0; i < b.N; i++ { agg := newExpoHistogramDataPoint[float64](1024, 20, false, false) n := smallestNonZeroNormalFloat64 for j := 0; j < 1024; j++ { agg.record(n) n = n * 2 } } } func BenchmarkExponentialHistogram(b *testing.B) { const ( maxSize = 160 maxScale = 20 noMinMax = false noSum = false ) b.Run("Int64/Cumulative", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { return Builder[int64]{ Temporality: metricdata.CumulativeTemporality, }.ExponentialBucketHistogram(maxSize, maxScale, noMinMax, noSum) })) b.Run("Int64/Delta", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { return Builder[int64]{ Temporality: metricdata.DeltaTemporality, }.ExponentialBucketHistogram(maxSize, maxScale, noMinMax, noSum) })) b.Run("Float64/Cumulative", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { return Builder[float64]{ Temporality: metricdata.CumulativeTemporality, }.ExponentialBucketHistogram(maxSize, maxScale, noMinMax, noSum) })) b.Run("Float64/Delta", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { return Builder[float64]{ Temporality: metricdata.DeltaTemporality, }.ExponentialBucketHistogram(maxSize, maxScale, noMinMax, noSum) })) } func TestSubNormal(t *testing.T) { want := &expoHistogramDataPoint[float64]{ maxSize: 4, count: 3, min: math.SmallestNonzeroFloat64, max: math.SmallestNonzeroFloat64, sum: 3 * math.SmallestNonzeroFloat64, scale: 20, posBuckets: expoBuckets{ startBin: -1126170625, counts: []uint64{3}, }, } ehdp := newExpoHistogramDataPoint[float64](4, 20, false, false) ehdp.record(math.SmallestNonzeroFloat64) ehdp.record(math.SmallestNonzeroFloat64) ehdp.record(math.SmallestNonzeroFloat64) assert.Equal(t, want, ehdp) } func TestExponentialHistogramAggregation(t *testing.T) { t.Run("Int64", testExponentialHistogramAggregation[int64]) t.Run("Float64", testExponentialHistogramAggregation[float64]) } func testExponentialHistogramAggregation[N int64 | float64](t *testing.T) { const ( maxSize = 4 maxScale = 20 noMinMax = false noSum = false ) tests := []struct { name string build func() (Measure[N], ComputeAggregation) input [][]N want metricdata.ExponentialHistogram[N] wantCount int }{ { name: "Delta Single", build: func() (Measure[N], ComputeAggregation) { return Builder[N]{ Temporality: metricdata.DeltaTemporality, }.ExponentialBucketHistogram(maxSize, maxScale, noMinMax, noSum) }, input: [][]N{ {4, 4, 4, 2, 16, 1}, }, want: metricdata.ExponentialHistogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Count: 6, Min: metricdata.NewExtrema[N](1), Max: metricdata.NewExtrema[N](16), Sum: 31, Scale: -1, PositiveBucket: metricdata.ExponentialBucket{ Offset: -1, Counts: []uint64{1, 4, 1}, }, }, }, }, wantCount: 1, }, { name: "Cumulative Single", build: func() (Measure[N], ComputeAggregation) { return Builder[N]{ Temporality: metricdata.CumulativeTemporality, }.ExponentialBucketHistogram(maxSize, maxScale, noMinMax, noSum) }, input: [][]N{ {4, 4, 4, 2, 16, 1}, }, want: metricdata.ExponentialHistogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Count: 6, Min: metricdata.NewExtrema[N](1), Max: metricdata.NewExtrema[N](16), Sum: 31, Scale: -1, PositiveBucket: metricdata.ExponentialBucket{ Offset: -1, Counts: []uint64{1, 4, 1}, }, }, }, }, wantCount: 1, }, { name: "Delta Multiple", build: func() (Measure[N], ComputeAggregation) { return Builder[N]{ Temporality: metricdata.DeltaTemporality, }.ExponentialBucketHistogram(maxSize, maxScale, noMinMax, noSum) }, input: [][]N{ {2, 3, 8}, {4, 4, 4, 2, 16, 1}, }, want: metricdata.ExponentialHistogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Count: 6, Min: metricdata.NewExtrema[N](1), Max: metricdata.NewExtrema[N](16), Sum: 31, Scale: -1, PositiveBucket: metricdata.ExponentialBucket{ Offset: -1, Counts: []uint64{1, 4, 1}, }, }, }, }, wantCount: 1, }, { name: "Cumulative Multiple ", build: func() (Measure[N], ComputeAggregation) { return Builder[N]{ Temporality: metricdata.CumulativeTemporality, }.ExponentialBucketHistogram(maxSize, maxScale, noMinMax, noSum) }, input: [][]N{ {2, 3, 8}, {4, 4, 4, 2, 16, 1}, }, want: metricdata.ExponentialHistogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Count: 9, Min: metricdata.NewExtrema[N](1), Max: metricdata.NewExtrema[N](16), Sum: 44, Scale: -1, PositiveBucket: metricdata.ExponentialBucket{ Offset: -1, Counts: []uint64{1, 6, 2}, }, }, }, }, wantCount: 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { restore := withHandler(t) defer restore() in, out := tt.build() ctx := context.Background() var got metricdata.Aggregation var count int for _, n := range tt.input { for _, v := range n { in(ctx, v, *attribute.EmptySet()) } count = out(&got) } metricdatatest.AssertAggregationsEqual(t, tt.want, got, metricdatatest.IgnoreTimestamp()) assert.Equal(t, tt.wantCount, count) }) } } func FuzzGetBin(f *testing.F) { values := []float64{ 2.0, 0x1p35, 0x1.0000000000001p35, 0x1.fffffffffffffp34, 0x1p300, 0x1.0000000000001p300, 0x1.fffffffffffffp299, } scales := []int{0, 15, -5} for _, s := range scales { for _, v := range values { f.Add(v, s) } } f.Fuzz(func(t *testing.T, v float64, scale int) { // GetBin only works on positive values. if math.Signbit(v) { v = v * -1 } // GetBin Doesn't work on zero. if v == 0.0 { t.Skip("skipping test for zero") } p := newExpoHistogramDataPoint[float64](4, 20, false, false) // scale range is -10 to 20. p.scale = (scale%31+31)%31 - 10 got := p.getBin(v) if v <= lowerBound(got, p.scale) { t.Errorf("v=%x scale =%d had bin %d, but was below lower bound %x", v, p.scale, got, lowerBound(got, p.scale)) } if v > lowerBound(got+1, p.scale) { t.Errorf("v=%x scale =%d had bin %d, but was above upper bound %x", v, p.scale, got, lowerBound(got+1, p.scale)) } }) } func lowerBound(index int, scale int) float64 { // The lowerBound of the index of Math.SmallestNonzeroFloat64 at any scale // is always rounded down to 0.0. // For example lowerBound(getBin(Math.SmallestNonzeroFloat64, 7), 7) == 0.0 // 2 ^ (index * 2 ^ (-scale)) return math.Exp2(math.Ldexp(float64(index), -scale)) } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/histogram.go000066400000000000000000000141041452547353200252360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "sort" "sync" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) type buckets[N int64 | float64] struct { counts []uint64 count uint64 total N min, max N } // newBuckets returns buckets with n bins. func newBuckets[N int64 | float64](n int) *buckets[N] { return &buckets[N]{counts: make([]uint64, n)} } func (b *buckets[N]) sum(value N) { b.total += value } func (b *buckets[N]) bin(idx int, value N) { b.counts[idx]++ b.count++ if value < b.min { b.min = value } else if value > b.max { b.max = value } } // histValues summarizes a set of measurements as an histValues with // explicitly defined buckets. type histValues[N int64 | float64] struct { noSum bool bounds []float64 values map[attribute.Set]*buckets[N] valuesMu sync.Mutex } func newHistValues[N int64 | float64](bounds []float64, noSum bool) *histValues[N] { // The responsibility of keeping all buckets correctly associated with the // passed boundaries is ultimately this type's responsibility. Make a copy // here so we can always guarantee this. Or, in the case of failure, have // complete control over the fix. b := make([]float64, len(bounds)) copy(b, bounds) sort.Float64s(b) return &histValues[N]{ noSum: noSum, bounds: b, values: make(map[attribute.Set]*buckets[N]), } } // Aggregate records the measurement value, scoped by attr, and aggregates it // into a histogram. func (s *histValues[N]) measure(_ context.Context, value N, attr attribute.Set) { // This search will return an index in the range [0, len(s.bounds)], where // it will return len(s.bounds) if value is greater than the last element // of s.bounds. This aligns with the buckets in that the length of buckets // is len(s.bounds)+1, with the last bucket representing: // (s.bounds[len(s.bounds)-1], +∞). idx := sort.SearchFloat64s(s.bounds, float64(value)) s.valuesMu.Lock() defer s.valuesMu.Unlock() b, ok := s.values[attr] if !ok { // N+1 buckets. For example: // // bounds = [0, 5, 10] // // Then, // // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) b = newBuckets[N](len(s.bounds) + 1) // Ensure min and max are recorded values (not zero), for new buckets. b.min, b.max = value, value s.values[attr] = b } b.bin(idx, value) if !s.noSum { b.sum(value) } } // newHistogram returns an Aggregator that summarizes a set of measurements as // an histogram. func newHistogram[N int64 | float64](boundaries []float64, noMinMax, noSum bool) *histogram[N] { return &histogram[N]{ histValues: newHistValues[N](boundaries, noSum), noMinMax: noMinMax, start: now(), } } // histogram summarizes a set of measurements as an histogram with explicitly // defined buckets. type histogram[N int64 | float64] struct { *histValues[N] noMinMax bool start time.Time } func (s *histogram[N]) delta(dest *metricdata.Aggregation) int { t := now() // If *dest is not a metricdata.Histogram, memory reuse is missed. In that // case, use the zero-value h and hope for better alignment next cycle. h, _ := (*dest).(metricdata.Histogram[N]) h.Temporality = metricdata.DeltaTemporality s.valuesMu.Lock() defer s.valuesMu.Unlock() // Do not allow modification of our copy of bounds. bounds := make([]float64, len(s.bounds)) copy(bounds, s.bounds) n := len(s.values) hDPts := reset(h.DataPoints, n, n) var i int for a, b := range s.values { hDPts[i].Attributes = a hDPts[i].StartTime = s.start hDPts[i].Time = t hDPts[i].Count = b.count hDPts[i].Bounds = bounds hDPts[i].BucketCounts = b.counts if !s.noSum { hDPts[i].Sum = b.total } if !s.noMinMax { hDPts[i].Min = metricdata.NewExtrema(b.min) hDPts[i].Max = metricdata.NewExtrema(b.max) } // Unused attribute sets do not report. delete(s.values, a) i++ } // The delta collection cycle resets. s.start = t h.DataPoints = hDPts *dest = h return n } func (s *histogram[N]) cumulative(dest *metricdata.Aggregation) int { t := now() // If *dest is not a metricdata.Histogram, memory reuse is missed. In that // case, use the zero-value h and hope for better alignment next cycle. h, _ := (*dest).(metricdata.Histogram[N]) h.Temporality = metricdata.CumulativeTemporality s.valuesMu.Lock() defer s.valuesMu.Unlock() // Do not allow modification of our copy of bounds. bounds := make([]float64, len(s.bounds)) copy(bounds, s.bounds) n := len(s.values) hDPts := reset(h.DataPoints, n, n) var i int for a, b := range s.values { // The HistogramDataPoint field values returned need to be copies of // the buckets value as we will keep updating them. // // TODO (#3047): Making copies for bounds and counts incurs a large // memory allocation footprint. Alternatives should be explored. counts := make([]uint64, len(b.counts)) copy(counts, b.counts) hDPts[i].Attributes = a hDPts[i].StartTime = s.start hDPts[i].Time = t hDPts[i].Count = b.count hDPts[i].Bounds = bounds hDPts[i].BucketCounts = counts if !s.noSum { hDPts[i].Sum = b.total } if !s.noMinMax { hDPts[i].Min = metricdata.NewExtrema(b.min) hDPts[i].Max = metricdata.NewExtrema(b.max) } i++ // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. } h.DataPoints = hDPts *dest = h return n } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/histogram_test.go000066400000000000000000000243761452547353200263110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "sort" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" ) var ( bounds = []float64{1, 5} noMinMax = false ) func TestHistogram(t *testing.T) { t.Cleanup(mockTime(now)) t.Run("Int64/Delta/Sum", testDeltaHist[int64](conf[int64]{hPt: hPointSummed[int64]})) t.Run("Int64/Delta/NoSum", testDeltaHist[int64](conf[int64]{noSum: true, hPt: hPoint[int64]})) t.Run("Float64/Delta/Sum", testDeltaHist[float64](conf[float64]{hPt: hPointSummed[float64]})) t.Run("Float64/Delta/NoSum", testDeltaHist[float64](conf[float64]{noSum: true, hPt: hPoint[float64]})) t.Run("Int64/Cumulative/Sum", testCumulativeHist[int64](conf[int64]{hPt: hPointSummed[int64]})) t.Run("Int64/Cumulative/NoSum", testCumulativeHist[int64](conf[int64]{noSum: true, hPt: hPoint[int64]})) t.Run("Float64/Cumulative/Sum", testCumulativeHist[float64](conf[float64]{hPt: hPointSummed[float64]})) t.Run("Float64/Cumulative/NoSum", testCumulativeHist[float64](conf[float64]{noSum: true, hPt: hPoint[float64]})) } type conf[N int64 | float64] struct { noSum bool hPt func(attribute.Set, N, uint64) metricdata.HistogramDataPoint[N] } func testDeltaHist[N int64 | float64](c conf[N]) func(t *testing.T) { in, out := Builder[N]{ Temporality: metricdata.DeltaTemporality, Filter: attrFltr, }.ExplicitBucketHistogram(bounds, noMinMax, c.noSum) ctx := context.Background() return test[N](in, out, []teststep[N]{ { input: []arg[N]{}, expect: output{ n: 0, agg: metricdata.Histogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{}, }, }, }, { input: []arg[N]{ {ctx, 2, alice}, {ctx, 10, bob}, {ctx, 2, alice}, {ctx, 2, alice}, {ctx, 10, bob}, }, expect: output{ n: 2, agg: metricdata.Histogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ c.hPt(fltrAlice, 2, 3), c.hPt(fltrBob, 10, 2), }, }, }, }, { input: []arg[N]{ {ctx, 10, alice}, {ctx, 3, bob}, }, expect: output{ n: 2, agg: metricdata.Histogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ c.hPt(fltrAlice, 10, 1), c.hPt(fltrBob, 3, 1), }, }, }, }, { input: []arg[N]{}, // Delta histograms are expected to reset. expect: output{ n: 0, agg: metricdata.Histogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{}, }, }, }, }) } func testCumulativeHist[N int64 | float64](c conf[N]) func(t *testing.T) { in, out := Builder[N]{ Temporality: metricdata.CumulativeTemporality, Filter: attrFltr, }.ExplicitBucketHistogram(bounds, noMinMax, c.noSum) ctx := context.Background() return test[N](in, out, []teststep[N]{ { input: []arg[N]{}, expect: output{ n: 0, agg: metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{}, }, }, }, { input: []arg[N]{ {ctx, 2, alice}, {ctx, 10, bob}, {ctx, 2, alice}, {ctx, 2, alice}, {ctx, 10, bob}, }, expect: output{ n: 2, agg: metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ c.hPt(fltrAlice, 2, 3), c.hPt(fltrBob, 10, 2), }, }, }, }, { input: []arg[N]{ {ctx, 2, alice}, {ctx, 10, bob}, }, expect: output{ n: 2, agg: metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ c.hPt(fltrAlice, 2, 4), c.hPt(fltrBob, 10, 3), }, }, }, }, { input: []arg[N]{}, expect: output{ n: 2, agg: metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ c.hPt(fltrAlice, 2, 4), c.hPt(fltrBob, 10, 3), }, }, }, }, }) } // hPointSummed returns an HistogramDataPoint that started and ended now with // multi number of measurements values v. It includes a min and max (set to v). func hPointSummed[N int64 | float64](a attribute.Set, v N, multi uint64) metricdata.HistogramDataPoint[N] { idx := sort.SearchFloat64s(bounds, float64(v)) counts := make([]uint64, len(bounds)+1) counts[idx] += multi return metricdata.HistogramDataPoint[N]{ Attributes: a, StartTime: now(), Time: now(), Count: multi, Bounds: bounds, BucketCounts: counts, Min: metricdata.NewExtrema(v), Max: metricdata.NewExtrema(v), Sum: v * N(multi), } } // hPoint returns an HistogramDataPoint that started and ended now with multi // number of measurements values v. It includes a min and max (set to v). func hPoint[N int64 | float64](a attribute.Set, v N, multi uint64) metricdata.HistogramDataPoint[N] { idx := sort.SearchFloat64s(bounds, float64(v)) counts := make([]uint64, len(bounds)+1) counts[idx] += multi return metricdata.HistogramDataPoint[N]{ Attributes: a, StartTime: now(), Time: now(), Count: multi, Bounds: bounds, BucketCounts: counts, Min: metricdata.NewExtrema(v), Max: metricdata.NewExtrema(v), } } func TestBucketsBin(t *testing.T) { t.Run("Int64", testBucketsBin[int64]()) t.Run("Float64", testBucketsBin[float64]()) } func testBucketsBin[N int64 | float64]() func(t *testing.T) { return func(t *testing.T) { b := newBuckets[N](3) assertB := func(counts []uint64, count uint64, min, max N) { t.Helper() assert.Equal(t, counts, b.counts) assert.Equal(t, count, b.count) assert.Equal(t, min, b.min) assert.Equal(t, max, b.max) } assertB([]uint64{0, 0, 0}, 0, 0, 0) b.bin(1, 2) assertB([]uint64{0, 1, 0}, 1, 0, 2) b.bin(0, -1) assertB([]uint64{1, 1, 0}, 2, -1, 2) } } func TestBucketsSum(t *testing.T) { t.Run("Int64", testBucketsSum[int64]()) t.Run("Float64", testBucketsSum[float64]()) } func testBucketsSum[N int64 | float64]() func(t *testing.T) { return func(t *testing.T) { b := newBuckets[N](3) var want N assert.Equal(t, want, b.total) b.sum(2) want = 2 assert.Equal(t, want, b.total) b.sum(-1) want = 1 assert.Equal(t, want, b.total) } } func TestHistogramImmutableBounds(t *testing.T) { b := []float64{0, 1, 2} cpB := make([]float64, len(b)) copy(cpB, b) h := newHistogram[int64](b, false, false) require.Equal(t, cpB, h.bounds) b[0] = 10 assert.Equal(t, cpB, h.bounds, "modifying the bounds argument should not change the bounds") h.measure(context.Background(), 5, alice) var data metricdata.Aggregation = metricdata.Histogram[int64]{} h.cumulative(&data) hdp := data.(metricdata.Histogram[int64]).DataPoints[0] hdp.Bounds[1] = 10 assert.Equal(t, cpB, h.bounds, "modifying the Aggregation bounds should not change the bounds") } func TestCumulativeHistogramImutableCounts(t *testing.T) { h := newHistogram[int64](bounds, noMinMax, false) h.measure(context.Background(), 5, alice) var data metricdata.Aggregation = metricdata.Histogram[int64]{} h.cumulative(&data) hdp := data.(metricdata.Histogram[int64]).DataPoints[0] require.Equal(t, hdp.BucketCounts, h.values[alice].counts) cpCounts := make([]uint64, len(hdp.BucketCounts)) copy(cpCounts, hdp.BucketCounts) hdp.BucketCounts[0] = 10 assert.Equal(t, cpCounts, h.values[alice].counts, "modifying the Aggregator bucket counts should not change the Aggregator") } func TestDeltaHistogramReset(t *testing.T) { t.Cleanup(mockTime(now)) h := newHistogram[int64](bounds, noMinMax, false) var data metricdata.Aggregation = metricdata.Histogram[int64]{} require.Equal(t, 0, h.delta(&data)) require.Len(t, data.(metricdata.Histogram[int64]).DataPoints, 0) h.measure(context.Background(), 1, alice) expect := metricdata.Histogram[int64]{Temporality: metricdata.DeltaTemporality} expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](alice, 1, 1)} h.delta(&data) metricdatatest.AssertAggregationsEqual(t, expect, data) // The attr set should be forgotten once Aggregations is called. expect.DataPoints = nil assert.Equal(t, 0, h.delta(&data)) assert.Len(t, data.(metricdata.Histogram[int64]).DataPoints, 0) // Aggregating another set should not affect the original (alice). h.measure(context.Background(), 1, bob) expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](bob, 1, 1)} h.delta(&data) metricdatatest.AssertAggregationsEqual(t, expect, data) } func BenchmarkHistogram(b *testing.B) { b.Run("Int64/Cumulative", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { return Builder[int64]{ Temporality: metricdata.CumulativeTemporality, }.ExplicitBucketHistogram(bounds, noMinMax, false) })) b.Run("Int64/Delta", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { return Builder[int64]{ Temporality: metricdata.DeltaTemporality, }.ExplicitBucketHistogram(bounds, noMinMax, false) })) b.Run("Float64/Cumulative", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { return Builder[float64]{ Temporality: metricdata.CumulativeTemporality, }.ExplicitBucketHistogram(bounds, noMinMax, false) })) b.Run("Float64/Delta", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { return Builder[float64]{ Temporality: metricdata.DeltaTemporality, }.ExplicitBucketHistogram(bounds, noMinMax, false) })) } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/lastvalue.go000066400000000000000000000034611452547353200252450ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "sync" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // datapoint is timestamped measurement data. type datapoint[N int64 | float64] struct { timestamp time.Time value N } func newLastValue[N int64 | float64]() *lastValue[N] { return &lastValue[N]{values: make(map[attribute.Set]datapoint[N])} } // lastValue summarizes a set of measurements as the last one made. type lastValue[N int64 | float64] struct { sync.Mutex values map[attribute.Set]datapoint[N] } func (s *lastValue[N]) measure(ctx context.Context, value N, attr attribute.Set) { d := datapoint[N]{timestamp: now(), value: value} s.Lock() s.values[attr] = d s.Unlock() } func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) { s.Lock() defer s.Unlock() n := len(s.values) *dest = reset(*dest, n, n) var i int for a, v := range s.values { (*dest)[i].Attributes = a // The event time is the only meaningful timestamp, StartTime is // ignored. (*dest)[i].Time = v.timestamp (*dest)[i].Value = v.value // Do not report stale values. delete(s.values, a) i++ } } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/lastvalue_test.go000066400000000000000000000045521452547353200263060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "testing" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestLastValue(t *testing.T) { t.Cleanup(mockTime(now)) t.Run("Int64", testLastValue[int64]()) t.Run("Float64", testLastValue[float64]()) } func testLastValue[N int64 | float64]() func(*testing.T) { in, out := Builder[N]{Filter: attrFltr}.LastValue() ctx := context.Background() return test[N](in, out, []teststep[N]{ { // Empty output if nothing is measured. input: []arg[N]{}, expect: output{n: 0, agg: metricdata.Gauge[N]{}}, }, { input: []arg[N]{ {ctx, 1, alice}, {ctx, -1, bob}, {ctx, 1, fltrAlice}, {ctx, 2, alice}, {ctx, -10, bob}, }, expect: output{ n: 2, agg: metricdata.Gauge[N]{ DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, Time: staticTime, Value: 2, }, { Attributes: fltrBob, Time: staticTime, Value: -10, }, }, }, }, }, { // Everything resets, do not report old measurements. input: []arg[N]{}, expect: output{n: 0, agg: metricdata.Gauge[N]{}}, }, { input: []arg[N]{ {ctx, 10, alice}, {ctx, 3, bob}, }, expect: output{ n: 2, agg: metricdata.Gauge[N]{ DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, Time: staticTime, Value: 10, }, { Attributes: fltrBob, Time: staticTime, Value: 3, }, }, }, }, }, }) } func BenchmarkLastValue(b *testing.B) { b.Run("Int64", benchmarkAggregate(Builder[int64]{}.LastValue)) b.Run("Float64", benchmarkAggregate(Builder[float64]{}.LastValue)) } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/sum.go000066400000000000000000000127611452547353200240540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "sync" "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // valueMap is the storage for sums. type valueMap[N int64 | float64] struct { sync.Mutex values map[attribute.Set]N } func newValueMap[N int64 | float64]() *valueMap[N] { return &valueMap[N]{values: make(map[attribute.Set]N)} } func (s *valueMap[N]) measure(_ context.Context, value N, attr attribute.Set) { s.Lock() s.values[attr] += value s.Unlock() } // newSum returns an aggregator that summarizes a set of measurements as their // arithmetic sum. Each sum is scoped by attributes and the aggregation cycle // the measurements were made in. func newSum[N int64 | float64](monotonic bool) *sum[N] { return &sum[N]{ valueMap: newValueMap[N](), monotonic: monotonic, start: now(), } } // sum summarizes a set of measurements made as their arithmetic sum. type sum[N int64 | float64] struct { *valueMap[N] monotonic bool start time.Time } func (s *sum[N]) delta(dest *metricdata.Aggregation) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, // use the zero-value sData and hope for better alignment next cycle. sData, _ := (*dest).(metricdata.Sum[N]) sData.Temporality = metricdata.DeltaTemporality sData.IsMonotonic = s.monotonic s.Lock() defer s.Unlock() n := len(s.values) dPts := reset(sData.DataPoints, n, n) var i int for attr, value := range s.values { dPts[i].Attributes = attr dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = value // Do not report stale values. delete(s.values, attr) i++ } // The delta collection cycle resets. s.start = t sData.DataPoints = dPts *dest = sData return n } func (s *sum[N]) cumulative(dest *metricdata.Aggregation) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, // use the zero-value sData and hope for better alignment next cycle. sData, _ := (*dest).(metricdata.Sum[N]) sData.Temporality = metricdata.CumulativeTemporality sData.IsMonotonic = s.monotonic s.Lock() defer s.Unlock() n := len(s.values) dPts := reset(sData.DataPoints, n, n) var i int for attr, value := range s.values { dPts[i].Attributes = attr dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = value // TODO (#3006): This will use an unbounded amount of memory if there // are unbounded number of attribute sets being aggregated. Attribute // sets that become "stale" need to be forgotten so this will not // overload the system. i++ } sData.DataPoints = dPts *dest = sData return n } // newPrecomputedSum returns an aggregator that summarizes a set of // observatrions as their arithmetic sum. Each sum is scoped by attributes and // the aggregation cycle the measurements were made in. func newPrecomputedSum[N int64 | float64](monotonic bool) *precomputedSum[N] { return &precomputedSum[N]{ valueMap: newValueMap[N](), monotonic: monotonic, start: now(), } } // precomputedSum summarizes a set of observatrions as their arithmetic sum. type precomputedSum[N int64 | float64] struct { *valueMap[N] monotonic bool start time.Time reported map[attribute.Set]N } func (s *precomputedSum[N]) delta(dest *metricdata.Aggregation) int { t := now() newReported := make(map[attribute.Set]N) // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, // use the zero-value sData and hope for better alignment next cycle. sData, _ := (*dest).(metricdata.Sum[N]) sData.Temporality = metricdata.DeltaTemporality sData.IsMonotonic = s.monotonic s.Lock() defer s.Unlock() n := len(s.values) dPts := reset(sData.DataPoints, n, n) var i int for attr, value := range s.values { delta := value - s.reported[attr] dPts[i].Attributes = attr dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = delta newReported[attr] = value // Unused attribute sets do not report. delete(s.values, attr) i++ } // Unused attribute sets are forgotten. s.reported = newReported // The delta collection cycle resets. s.start = t sData.DataPoints = dPts *dest = sData return n } func (s *precomputedSum[N]) cumulative(dest *metricdata.Aggregation) int { t := now() // If *dest is not a metricdata.Sum, memory reuse is missed. In that case, // use the zero-value sData and hope for better alignment next cycle. sData, _ := (*dest).(metricdata.Sum[N]) sData.Temporality = metricdata.CumulativeTemporality sData.IsMonotonic = s.monotonic s.Lock() defer s.Unlock() n := len(s.values) dPts := reset(sData.DataPoints, n, n) var i int for attr, value := range s.values { dPts[i].Attributes = attr dPts[i].StartTime = s.start dPts[i].Time = t dPts[i].Value = value // Unused attribute sets do not report. delete(s.values, attr) i++ } sData.DataPoints = dPts *dest = sData return n } opentelemetry-go-1.21.0/sdk/metric/internal/aggregate/sum_test.go000066400000000000000000000244431452547353200251130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" import ( "context" "testing" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestSum(t *testing.T) { t.Cleanup(mockTime(now)) t.Run("Int64/DeltaSum", testDeltaSum[int64]()) t.Run("Float64/DeltaSum", testDeltaSum[float64]()) t.Run("Int64/CumulativeSum", testCumulativeSum[int64]()) t.Run("Float64/CumulativeSum", testCumulativeSum[float64]()) t.Run("Int64/DeltaPrecomputedSum", testDeltaPrecomputedSum[int64]()) t.Run("Float64/DeltaPrecomputedSum", testDeltaPrecomputedSum[float64]()) t.Run("Int64/CumulativePrecomputedSum", testCumulativePrecomputedSum[int64]()) t.Run("Float64/CumulativePrecomputedSum", testCumulativePrecomputedSum[float64]()) } func testDeltaSum[N int64 | float64]() func(t *testing.T) { mono := false in, out := Builder[N]{ Temporality: metricdata.DeltaTemporality, Filter: attrFltr, }.Sum(mono) ctx := context.Background() return test[N](in, out, []teststep[N]{ { input: []arg[N]{}, expect: output{ n: 0, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[N]{}, }, }, }, { input: []arg[N]{ {ctx, 1, alice}, {ctx, -1, bob}, {ctx, 1, alice}, {ctx, 2, alice}, {ctx, -10, bob}, }, expect: output{ n: 2, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, StartTime: staticTime, Time: staticTime, Value: 4, }, { Attributes: fltrBob, StartTime: staticTime, Time: staticTime, Value: -11, }, }, }, }, }, { input: []arg[N]{ {ctx, 10, alice}, {ctx, 3, bob}, }, expect: output{ n: 2, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, StartTime: staticTime, Time: staticTime, Value: 10, }, { Attributes: fltrBob, StartTime: staticTime, Time: staticTime, Value: 3, }, }, }, }, }, { input: []arg[N]{}, // Delta sums are expected to reset. expect: output{ n: 0, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[N]{}, }, }, }, }) } func testCumulativeSum[N int64 | float64]() func(t *testing.T) { mono := false in, out := Builder[N]{ Temporality: metricdata.CumulativeTemporality, Filter: attrFltr, }.Sum(mono) ctx := context.Background() return test[N](in, out, []teststep[N]{ { input: []arg[N]{}, expect: output{ n: 0, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[N]{}, }, }, }, { input: []arg[N]{ {ctx, 1, alice}, {ctx, -1, bob}, {ctx, 1, alice}, {ctx, 2, alice}, {ctx, -10, bob}, }, expect: output{ n: 2, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, StartTime: staticTime, Time: staticTime, Value: 4, }, { Attributes: fltrBob, StartTime: staticTime, Time: staticTime, Value: -11, }, }, }, }, }, { input: []arg[N]{ {ctx, 10, alice}, {ctx, 3, bob}, }, expect: output{ n: 2, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, StartTime: staticTime, Time: staticTime, Value: 14, }, { Attributes: fltrBob, StartTime: staticTime, Time: staticTime, Value: -8, }, }, }, }, }, }) } func testDeltaPrecomputedSum[N int64 | float64]() func(t *testing.T) { mono := false in, out := Builder[N]{ Temporality: metricdata.DeltaTemporality, Filter: attrFltr, }.PrecomputedSum(mono) ctx := context.Background() return test[N](in, out, []teststep[N]{ { input: []arg[N]{}, expect: output{ n: 0, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[N]{}, }, }, }, { input: []arg[N]{ {ctx, 1, alice}, {ctx, -1, bob}, {ctx, 1, fltrAlice}, {ctx, 2, alice}, {ctx, -10, bob}, }, expect: output{ n: 2, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, StartTime: staticTime, Time: staticTime, Value: 4, }, { Attributes: fltrBob, StartTime: staticTime, Time: staticTime, Value: -11, }, }, }, }, }, { input: []arg[N]{ {ctx, 1, fltrAlice}, {ctx, 10, alice}, {ctx, 3, bob}, }, expect: output{ n: 2, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, StartTime: staticTime, Time: staticTime, Value: 7, }, { Attributes: fltrBob, StartTime: staticTime, Time: staticTime, Value: 14, }, }, }, }, }, { input: []arg[N]{}, // Precomputed sums are expected to reset. expect: output{ n: 0, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.DataPoint[N]{}, }, }, }, }) } func testCumulativePrecomputedSum[N int64 | float64]() func(t *testing.T) { mono := false in, out := Builder[N]{ Temporality: metricdata.CumulativeTemporality, Filter: attrFltr, }.PrecomputedSum(mono) ctx := context.Background() return test[N](in, out, []teststep[N]{ { input: []arg[N]{}, expect: output{ n: 0, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[N]{}, }, }, }, { input: []arg[N]{ {ctx, 1, alice}, {ctx, -1, bob}, {ctx, 1, fltrAlice}, {ctx, 2, alice}, {ctx, -10, bob}, }, expect: output{ n: 2, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, StartTime: staticTime, Time: staticTime, Value: 4, }, { Attributes: fltrBob, StartTime: staticTime, Time: staticTime, Value: -11, }, }, }, }, }, { input: []arg[N]{ {ctx, 1, fltrAlice}, {ctx, 10, alice}, {ctx, 3, bob}, }, expect: output{ n: 2, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, StartTime: staticTime, Time: staticTime, Value: 11, }, { Attributes: fltrBob, StartTime: staticTime, Time: staticTime, Value: 3, }, }, }, }, }, { input: []arg[N]{}, // Precomputed sums are expected to reset. expect: output{ n: 0, agg: metricdata.Sum[N]{ IsMonotonic: mono, Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.DataPoint[N]{}, }, }, }, }) } func BenchmarkSum(b *testing.B) { // The monotonic argument is only used to annotate the Sum returned from // the Aggregation method. It should not have an effect on operational // performance, therefore, only monotonic=false is benchmarked here. b.Run("Int64/Cumulative", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { return Builder[int64]{ Temporality: metricdata.CumulativeTemporality, }.Sum(false) })) b.Run("Int64/Delta", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { return Builder[int64]{ Temporality: metricdata.DeltaTemporality, }.Sum(false) })) b.Run("Float64/Cumulative", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { return Builder[float64]{ Temporality: metricdata.CumulativeTemporality, }.Sum(false) })) b.Run("Float64/Delta", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { return Builder[float64]{ Temporality: metricdata.DeltaTemporality, }.Sum(false) })) b.Run("Precomputed/Int64/Cumulative", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { return Builder[int64]{ Temporality: metricdata.CumulativeTemporality, }.PrecomputedSum(false) })) b.Run("Precomputed/Int64/Delta", benchmarkAggregate(func() (Measure[int64], ComputeAggregation) { return Builder[int64]{ Temporality: metricdata.DeltaTemporality, }.PrecomputedSum(false) })) b.Run("Precomputed/Float64/Cumulative", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { return Builder[float64]{ Temporality: metricdata.CumulativeTemporality, }.PrecomputedSum(false) })) b.Run("Precomputed/Float64/Delta", benchmarkAggregate(func() (Measure[float64], ComputeAggregation) { return Builder[float64]{ Temporality: metricdata.DeltaTemporality, }.PrecomputedSum(false) })) } opentelemetry-go-1.21.0/sdk/metric/internal/reuse_slice.go000066400000000000000000000016541452547353200236230ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" // ReuseSlice returns a zeroed view of slice if its capacity is greater than or // equal to n. Otherwise, it returns a new []T with capacity equal to n. func ReuseSlice[T any](slice []T, n int) []T { if cap(slice) >= n { return slice[:n] } return make([]T, n) } opentelemetry-go-1.21.0/sdk/metric/manual_reader.go000066400000000000000000000153601452547353200223030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "errors" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // ManualReader is a simple Reader that allows an application to // read metrics on demand. type ManualReader struct { sdkProducer atomic.Value shutdownOnce sync.Once mu sync.Mutex isShutdown bool externalProducers atomic.Value temporalitySelector TemporalitySelector aggregationSelector AggregationSelector } // Compile time check the manualReader implements Reader and is comparable. var _ = map[Reader]struct{}{&ManualReader{}: {}} // NewManualReader returns a Reader which is directly called to collect metrics. func NewManualReader(opts ...ManualReaderOption) *ManualReader { cfg := newManualReaderConfig(opts) r := &ManualReader{ temporalitySelector: cfg.temporalitySelector, aggregationSelector: cfg.aggregationSelector, } r.externalProducers.Store(cfg.producers) return r } // register stores the sdkProducer which enables the caller // to read metrics from the SDK on demand. func (mr *ManualReader) register(p sdkProducer) { // Only register once. If producer is already set, do nothing. if !mr.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { msg := "did not register manual reader" global.Error(errDuplicateRegister, msg) } } // temporality reports the Temporality for the instrument kind provided. func (mr *ManualReader) temporality(kind InstrumentKind) metricdata.Temporality { return mr.temporalitySelector(kind) } // aggregation returns what Aggregation to use for kind. func (mr *ManualReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. return mr.aggregationSelector(kind) } // Shutdown closes any connections and frees any resources used by the reader. // // This method is safe to call concurrently. func (mr *ManualReader) Shutdown(context.Context) error { err := ErrReaderShutdown mr.shutdownOnce.Do(func() { // Any future call to Collect will now return ErrReaderShutdown. mr.sdkProducer.Store(produceHolder{ produce: shutdownProducer{}.produce, }) mr.mu.Lock() defer mr.mu.Unlock() mr.isShutdown = true // release references to Producer(s) mr.externalProducers.Store([]Producer{}) err = nil }) return err } // Collect gathers all metric data related to the Reader from // the SDK and other Producers and stores the result in rm. // // Collect will return an error if called after shutdown. // Collect will return an error if rm is a nil ResourceMetrics. // Collect will return an error if the context's Done channel is closed. // // This method is safe to call concurrently. func (mr *ManualReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { if rm == nil { return errors.New("manual reader: *metricdata.ResourceMetrics is nil") } p := mr.sdkProducer.Load() if p == nil { return ErrReaderNotRegistered } ph, ok := p.(produceHolder) if !ok { // The atomic.Value is entirely in the periodicReader's control so // this should never happen. In the unforeseen case that this does // happen, return an error instead of panicking so a users code does // not halt in the processes. err := fmt.Errorf("manual reader: invalid producer: %T", p) return err } err := ph.produce(ctx, rm) if err != nil { return err } var errs []error for _, producer := range mr.externalProducers.Load().([]Producer) { externalMetrics, err := producer.Produce(ctx) if err != nil { errs = append(errs, err) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("ManualReader collection", "Data", rm) return unifyErrors(errs) } // MarshalLog returns logging data about the ManualReader. func (r *ManualReader) MarshalLog() interface{} { r.mu.Lock() down := r.isShutdown r.mu.Unlock() return struct { Type string Registered bool Shutdown bool }{ Type: "ManualReader", Registered: r.sdkProducer.Load() != nil, Shutdown: down, } } // manualReaderConfig contains configuration options for a ManualReader. type manualReaderConfig struct { temporalitySelector TemporalitySelector aggregationSelector AggregationSelector producers []Producer } // newManualReaderConfig returns a manualReaderConfig configured with options. func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig { cfg := manualReaderConfig{ temporalitySelector: DefaultTemporalitySelector, aggregationSelector: DefaultAggregationSelector, } for _, opt := range opts { cfg = opt.applyManual(cfg) } return cfg } // ManualReaderOption applies a configuration option value to a ManualReader. type ManualReaderOption interface { applyManual(manualReaderConfig) manualReaderConfig } // WithTemporalitySelector sets the TemporalitySelector a reader will use to // determine the Temporality of an instrument based on its kind. If this // option is not used, the reader will use the DefaultTemporalitySelector. func WithTemporalitySelector(selector TemporalitySelector) ManualReaderOption { return temporalitySelectorOption{selector: selector} } type temporalitySelectorOption struct { selector func(instrument InstrumentKind) metricdata.Temporality } // applyManual returns a manualReaderConfig with option applied. func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig { mrc.temporalitySelector = t.selector return mrc } // WithAggregationSelector sets the AggregationSelector a reader will use to // determine the aggregation to use for an instrument based on its kind. If // this option is not used, the reader will use the DefaultAggregationSelector // or the aggregation explicitly passed for a view matching an instrument. func WithAggregationSelector(selector AggregationSelector) ManualReaderOption { return aggregationSelectorOption{selector: selector} } type aggregationSelectorOption struct { selector AggregationSelector } // applyManual returns a manualReaderConfig with option applied. func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig { c.aggregationSelector = t.selector return c } opentelemetry-go-1.21.0/sdk/metric/manual_reader_test.go000066400000000000000000000067131452547353200233440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestManualReader(t *testing.T) { suite.Run(t, &readerTestSuite{Factory: func(opts ...ReaderOption) Reader { var mopts []ManualReaderOption for _, o := range opts { mopts = append(mopts, o) } return NewManualReader(mopts...) }}) } func BenchmarkManualReader(b *testing.B) { b.Run("Collect", benchReaderCollectFunc(NewManualReader())) } var ( deltaTemporalitySelector = func(InstrumentKind) metricdata.Temporality { return metricdata.DeltaTemporality } cumulativeTemporalitySelector = func(InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } ) func TestManualReaderTemporality(t *testing.T) { tests := []struct { name string options []ManualReaderOption // Currently only testing constant temporality. This should be expanded // if we put more advanced selection in the SDK wantTemporality metricdata.Temporality }{ { name: "default", wantTemporality: metricdata.CumulativeTemporality, }, { name: "delta", options: []ManualReaderOption{ WithTemporalitySelector(deltaTemporalitySelector), }, wantTemporality: metricdata.DeltaTemporality, }, { name: "repeats overwrite", options: []ManualReaderOption{ WithTemporalitySelector(deltaTemporalitySelector), WithTemporalitySelector(cumulativeTemporalitySelector), }, wantTemporality: metricdata.CumulativeTemporality, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var undefinedInstrument InstrumentKind rdr := NewManualReader(tt.options...) assert.Equal(t, tt.wantTemporality, rdr.temporality(undefinedInstrument)) }) } } func TestManualReaderCollect(t *testing.T) { expiredCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-1)) defer cancel() tests := []struct { name string ctx context.Context expectedErr error }{ { name: "with a valid context", ctx: context.Background(), expectedErr: nil, }, { name: "with an expired context", ctx: expiredCtx, expectedErr: context.DeadlineExceeded, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rdr := NewManualReader() mp := NewMeterProvider(WithReader(rdr)) meter := mp.Meter("test") // Ensure the pipeline has a callback setup testM, err := meter.Int64ObservableCounter("test") assert.NoError(t, err) _, err = meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { return nil }, testM) assert.NoError(t, err) rm := &metricdata.ResourceMetrics{} assert.Equal(t, tt.expectedErr, rdr.Collect(tt.ctx, rm)) }) } } opentelemetry-go-1.21.0/sdk/metric/meter.go000066400000000000000000000500301452547353200206110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "errors" "fmt" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" ) // ErrInstrumentName indicates the created instrument has an invalid name. // Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter. var ErrInstrumentName = errors.New("invalid instrument name") // meter handles the creation and coordination of all metric instruments. A // meter represents a single instrumentation scope; all metric telemetry // produced by an instrumentation scope will use metric instruments from a // single meter. type meter struct { embedded.Meter scope instrumentation.Scope pipes pipelines int64Resolver resolver[int64] float64Resolver resolver[float64] } func newMeter(s instrumentation.Scope, p pipelines) *meter { // viewCache ensures instrument conflicts, including number conflicts, this // meter is asked to create are logged to the user. var viewCache cache[string, instID] return &meter{ scope: s, pipes: p, int64Resolver: newResolver[int64](p, &viewCache), float64Resolver: newResolver[float64](p, &viewCache), } } // Compile-time check meter implements metric.Meter. var _ metric.Meter = (*meter)(nil) // Int64Counter returns a new instrument identified by name and configured with // options. The instrument is used to synchronously record increasing int64 // measurements during a computational operation. func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { cfg := metric.NewInt64CounterConfig(options...) const kind = InstrumentKindCounter p := int64InstProvider{m} i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return i, err } return i, validateInstrumentName(name) } // Int64UpDownCounter returns a new instrument identified by name and // configured with options. The instrument is used to synchronously record // int64 measurements during a computational operation. func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { cfg := metric.NewInt64UpDownCounterConfig(options...) const kind = InstrumentKindUpDownCounter p := int64InstProvider{m} i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return i, err } return i, validateInstrumentName(name) } // Int64Histogram returns a new instrument identified by name and configured // with options. The instrument is used to synchronously record the // distribution of int64 measurements during a computational operation. func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { cfg := metric.NewInt64HistogramConfig(options...) p := int64InstProvider{m} i, err := p.lookupHistogram(name, cfg) if err != nil { return i, err } return i, validateInstrumentName(name) } // Int64ObservableCounter returns a new instrument identified by name and // configured with options. The instrument is used to asynchronously record // increasing int64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { cfg := metric.NewInt64ObservableCounterConfig(options...) const kind = InstrumentKindObservableCounter p := int64ObservProvider{m} inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return nil, err } p.registerCallbacks(inst, cfg.Callbacks()) return inst, validateInstrumentName(name) } // Int64ObservableUpDownCounter returns a new instrument identified by name and // configured with options. The instrument is used to asynchronously record // int64 measurements once per a measurement collection cycle. Only the // measurements recorded during the collection cycle are exported. func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) const kind = InstrumentKindObservableUpDownCounter p := int64ObservProvider{m} inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return nil, err } p.registerCallbacks(inst, cfg.Callbacks()) return inst, validateInstrumentName(name) } // Int64ObservableGauge returns a new instrument identified by name and // configured with options. The instrument is used to asynchronously record // instantaneous int64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { cfg := metric.NewInt64ObservableGaugeConfig(options...) const kind = InstrumentKindObservableGauge p := int64ObservProvider{m} inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return nil, err } p.registerCallbacks(inst, cfg.Callbacks()) return inst, validateInstrumentName(name) } // Float64Counter returns a new instrument identified by name and configured // with options. The instrument is used to synchronously record increasing // float64 measurements during a computational operation. func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { cfg := metric.NewFloat64CounterConfig(options...) const kind = InstrumentKindCounter p := float64InstProvider{m} i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return i, err } return i, validateInstrumentName(name) } // Float64UpDownCounter returns a new instrument identified by name and // configured with options. The instrument is used to synchronously record // float64 measurements during a computational operation. func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { cfg := metric.NewFloat64UpDownCounterConfig(options...) const kind = InstrumentKindUpDownCounter p := float64InstProvider{m} i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return i, err } return i, validateInstrumentName(name) } // Float64Histogram returns a new instrument identified by name and configured // with options. The instrument is used to synchronously record the // distribution of float64 measurements during a computational operation. func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { cfg := metric.NewFloat64HistogramConfig(options...) p := float64InstProvider{m} i, err := p.lookupHistogram(name, cfg) if err != nil { return i, err } return i, validateInstrumentName(name) } // Float64ObservableCounter returns a new instrument identified by name and // configured with options. The instrument is used to asynchronously record // increasing float64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { cfg := metric.NewFloat64ObservableCounterConfig(options...) const kind = InstrumentKindObservableCounter p := float64ObservProvider{m} inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return nil, err } p.registerCallbacks(inst, cfg.Callbacks()) return inst, validateInstrumentName(name) } // Float64ObservableUpDownCounter returns a new instrument identified by name // and configured with options. The instrument is used to asynchronously record // float64 measurements once per a measurement collection cycle. Only the // measurements recorded during the collection cycle are exported. func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) const kind = InstrumentKindObservableUpDownCounter p := float64ObservProvider{m} inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return nil, err } p.registerCallbacks(inst, cfg.Callbacks()) return inst, validateInstrumentName(name) } // Float64ObservableGauge returns a new instrument identified by name and // configured with options. The instrument is used to asynchronously record // instantaneous float64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { cfg := metric.NewFloat64ObservableGaugeConfig(options...) const kind = InstrumentKindObservableGauge p := float64ObservProvider{m} inst, err := p.lookup(kind, name, cfg.Description(), cfg.Unit()) if err != nil { return nil, err } p.registerCallbacks(inst, cfg.Callbacks()) return inst, validateInstrumentName(name) } func validateInstrumentName(name string) error { if len(name) == 0 { return fmt.Errorf("%w: %s: is empty", ErrInstrumentName, name) } if len(name) > 255 { return fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, name) } if !isAlpha([]rune(name)[0]) { return fmt.Errorf("%w: %s: must start with a letter", ErrInstrumentName, name) } if len(name) == 1 { return nil } for _, c := range name[1:] { if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' { return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name) } } return nil } func isAlpha(c rune) bool { return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') } func isAlphanumeric(c rune) bool { return isAlpha(c) || ('0' <= c && c <= '9') } // RegisterCallback registers f to be called each collection cycle so it will // make observations for insts during those cycles. // // The only instruments f can make observations for are insts. All other // observations will be dropped and an error will be logged. // // Only instruments from this meter can be registered with f, an error is // returned if other instrument are provided. // // Only observations made in the callback will be exported. Unlike synchronous // instruments, asynchronous callbacks can "forget" attribute sets that are no // longer relevant by omitting the observation during the callback. // // The returned Registration can be used to unregister f. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { if len(insts) == 0 { // Don't allocate a observer if not needed. return noopRegister{}, nil } reg := newObserver() var errs multierror for _, inst := range insts { // Unwrap any global. if u, ok := inst.(interface { Unwrap() metric.Observable }); ok { inst = u.Unwrap() } switch o := inst.(type) { case int64Observable: if err := o.registerable(m); err != nil { if !errors.Is(err, errEmptyAgg) { errs.append(err) } continue } reg.registerInt64(o.observablID) case float64Observable: if err := o.registerable(m); err != nil { if !errors.Is(err, errEmptyAgg) { errs.append(err) } continue } reg.registerFloat64(o.observablID) default: // Instrument external to the SDK. return nil, fmt.Errorf("invalid observable: from different implementation") } } err := errs.errorOrNil() if reg.len() == 0 { // All insts use drop aggregation or are invalid. return noopRegister{}, err } // Some or all instruments were valid. cback := func(ctx context.Context) error { return f(ctx, reg) } return m.pipes.registerMultiCallback(cback), err } type observer struct { embedded.Observer float64 map[observablID[float64]]struct{} int64 map[observablID[int64]]struct{} } func newObserver() observer { return observer{ float64: make(map[observablID[float64]]struct{}), int64: make(map[observablID[int64]]struct{}), } } func (r observer) len() int { return len(r.float64) + len(r.int64) } func (r observer) registerFloat64(id observablID[float64]) { r.float64[id] = struct{}{} } func (r observer) registerInt64(id observablID[int64]) { r.int64[id] = struct{}{} } var ( errUnknownObserver = errors.New("unknown observable instrument") errUnregObserver = errors.New("observable instrument not registered for callback") ) func (r observer) ObserveFloat64(o metric.Float64Observable, v float64, opts ...metric.ObserveOption) { var oImpl float64Observable switch conv := o.(type) { case float64Observable: oImpl = conv case interface { Unwrap() metric.Observable }: // Unwrap any global. async := conv.Unwrap() var ok bool if oImpl, ok = async.(float64Observable); !ok { global.Error(errUnknownObserver, "failed to record asynchronous") return } default: global.Error(errUnknownObserver, "failed to record") return } if _, registered := r.float64[oImpl.observablID]; !registered { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, "description", oImpl.description, "unit", oImpl.unit, "number", fmt.Sprintf("%T", float64(0)), ) return } c := metric.NewObserveConfig(opts) oImpl.observe(v, c.Attributes()) } func (r observer) ObserveInt64(o metric.Int64Observable, v int64, opts ...metric.ObserveOption) { var oImpl int64Observable switch conv := o.(type) { case int64Observable: oImpl = conv case interface { Unwrap() metric.Observable }: // Unwrap any global. async := conv.Unwrap() var ok bool if oImpl, ok = async.(int64Observable); !ok { global.Error(errUnknownObserver, "failed to record asynchronous") return } default: global.Error(errUnknownObserver, "failed to record") return } if _, registered := r.int64[oImpl.observablID]; !registered { global.Error(errUnregObserver, "failed to record", "name", oImpl.name, "description", oImpl.description, "unit", oImpl.unit, "number", fmt.Sprintf("%T", int64(0)), ) return } c := metric.NewObserveConfig(opts) oImpl.observe(v, c.Attributes()) } type noopRegister struct{ embedded.Registration } func (noopRegister) Unregister() error { return nil } // int64InstProvider provides int64 OpenTelemetry instruments. type int64InstProvider struct{ *meter } func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[int64], error) { inst := Instrument{ Name: name, Description: desc, Unit: u, Kind: kind, Scope: p.scope, } return p.int64Resolver.Aggregators(inst) } func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) { boundaries := cfg.ExplicitBucketBoundaries() aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() if aggError != nil { // If boundaries are invalid, ignore them. boundaries = nil } inst := Instrument{ Name: name, Description: cfg.Description(), Unit: cfg.Unit(), Kind: InstrumentKindHistogram, Scope: p.scope, } measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries) return measures, errors.Join(aggError, err) } // lookup returns the resolved instrumentImpl. func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) { aggs, err := p.aggs(kind, name, desc, u) return &int64Inst{measures: aggs}, err } // lookupHistogram returns the resolved instrumentImpl. func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) { aggs, err := p.histogramAggs(name, cfg) return &int64Inst{measures: aggs}, err } // float64InstProvider provides float64 OpenTelemetry instruments. type float64InstProvider struct{ *meter } func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]aggregate.Measure[float64], error) { inst := Instrument{ Name: name, Description: desc, Unit: u, Kind: kind, Scope: p.scope, } return p.float64Resolver.Aggregators(inst) } func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) { boundaries := cfg.ExplicitBucketBoundaries() aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err() if aggError != nil { // If boundaries are invalid, ignore them. boundaries = nil } inst := Instrument{ Name: name, Description: cfg.Description(), Unit: cfg.Unit(), Kind: InstrumentKindHistogram, Scope: p.scope, } measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries) return measures, errors.Join(aggError, err) } // lookup returns the resolved instrumentImpl. func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) { aggs, err := p.aggs(kind, name, desc, u) return &float64Inst{measures: aggs}, err } // lookupHistogram returns the resolved instrumentImpl. func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) { aggs, err := p.histogramAggs(name, cfg) return &float64Inst{measures: aggs}, err } type int64ObservProvider struct{ *meter } func (p int64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (int64Observable, error) { aggs, err := (int64InstProvider)(p).aggs(kind, name, desc, u) return newInt64Observable(p.meter, kind, name, desc, u, aggs), err } func (p int64ObservProvider) registerCallbacks(inst int64Observable, cBacks []metric.Int64Callback) { if inst.observable == nil || len(inst.measures) == 0 { // Drop aggregator. return } for _, cBack := range cBacks { p.pipes.registerCallback(p.callback(inst, cBack)) } } func (p int64ObservProvider) callback(i int64Observable, f metric.Int64Callback) func(context.Context) error { inst := int64Observer{int64Observable: i} return func(ctx context.Context) error { return f(ctx, inst) } } type int64Observer struct { embedded.Int64Observer int64Observable } func (o int64Observer) Observe(val int64, opts ...metric.ObserveOption) { c := metric.NewObserveConfig(opts) o.observe(val, c.Attributes()) } type float64ObservProvider struct{ *meter } func (p float64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (float64Observable, error) { aggs, err := (float64InstProvider)(p).aggs(kind, name, desc, u) return newFloat64Observable(p.meter, kind, name, desc, u, aggs), err } func (p float64ObservProvider) registerCallbacks(inst float64Observable, cBacks []metric.Float64Callback) { if inst.observable == nil || len(inst.measures) == 0 { // Drop aggregator. return } for _, cBack := range cBacks { p.pipes.registerCallback(p.callback(inst, cBack)) } } func (p float64ObservProvider) callback(i float64Observable, f metric.Float64Callback) func(context.Context) error { inst := float64Observer{float64Observable: i} return func(ctx context.Context) error { return f(ctx, inst) } } type float64Observer struct { embedded.Float64Observer float64Observable } func (o float64Observer) Observe(val float64, opts ...metric.ObserveOption) { c := metric.NewObserveConfig(opts) o.observe(val, c.Attributes()) } opentelemetry-go-1.21.0/sdk/metric/meter_test.go000066400000000000000000001566171452547353200216720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "context" "errors" "fmt" "strings" "sync" "testing" "github.com/go-logr/logr" "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" "go.opentelemetry.io/otel/sdk/resource" ) // A meter should be able to make instruments concurrently. func TestMeterInstrumentConcurrentSafe(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(12) m := NewMeterProvider().Meter("inst-concurrency") go func() { _, _ = m.Float64ObservableCounter("AFCounter") wg.Done() }() go func() { _, _ = m.Float64ObservableUpDownCounter("AFUpDownCounter") wg.Done() }() go func() { _, _ = m.Float64ObservableGauge("AFGauge") wg.Done() }() go func() { _, _ = m.Int64ObservableCounter("AICounter") wg.Done() }() go func() { _, _ = m.Int64ObservableUpDownCounter("AIUpDownCounter") wg.Done() }() go func() { _, _ = m.Int64ObservableGauge("AIGauge") wg.Done() }() go func() { _, _ = m.Float64Counter("SFCounter") wg.Done() }() go func() { _, _ = m.Float64UpDownCounter("SFUpDownCounter") wg.Done() }() go func() { _, _ = m.Float64Histogram("SFHistogram") wg.Done() }() go func() { _, _ = m.Int64Counter("SICounter") wg.Done() }() go func() { _, _ = m.Int64UpDownCounter("SIUpDownCounter") wg.Done() }() go func() { _, _ = m.Int64Histogram("SIHistogram") wg.Done() }() wg.Wait() } var emptyCallback metric.Callback = func(context.Context, metric.Observer) error { return nil } // A Meter Should be able register Callbacks Concurrently. func TestMeterCallbackCreationConcurrency(t *testing.T) { wg := &sync.WaitGroup{} wg.Add(2) m := NewMeterProvider().Meter("callback-concurrency") go func() { _, _ = m.RegisterCallback(emptyCallback) wg.Done() }() go func() { _, _ = m.RegisterCallback(emptyCallback) wg.Done() }() wg.Wait() } func TestNoopCallbackUnregisterConcurrency(t *testing.T) { m := NewMeterProvider().Meter("noop-unregister-concurrency") reg, err := m.RegisterCallback(emptyCallback) require.NoError(t, err) wg := &sync.WaitGroup{} wg.Add(2) go func() { _ = reg.Unregister() wg.Done() }() go func() { _ = reg.Unregister() wg.Done() }() wg.Wait() } func TestCallbackUnregisterConcurrency(t *testing.T) { reader := NewManualReader() provider := NewMeterProvider(WithReader(reader)) meter := provider.Meter("unregister-concurrency") actr, err := meter.Float64ObservableCounter("counter") require.NoError(t, err) ag, err := meter.Int64ObservableGauge("gauge") require.NoError(t, err) regCtr, err := meter.RegisterCallback(emptyCallback, actr) require.NoError(t, err) regG, err := meter.RegisterCallback(emptyCallback, ag) require.NoError(t, err) wg := &sync.WaitGroup{} wg.Add(2) go func() { _ = regCtr.Unregister() _ = regG.Unregister() wg.Done() }() go func() { _ = regCtr.Unregister() _ = regG.Unregister() wg.Done() }() wg.Wait() } // Instruments should produce correct ResourceMetrics. func TestMeterCreatesInstruments(t *testing.T) { attrs := attribute.NewSet(attribute.String("name", "alice")) opt := metric.WithAttributeSet(attrs) testCases := []struct { name string fn func(*testing.T, metric.Meter) want metricdata.Metrics }{ { name: "ObservableInt64Count", fn: func(t *testing.T, m metric.Meter) { cback := func(_ context.Context, o metric.Int64Observer) error { o.Observe(4, opt) return nil } ctr, err := m.Int64ObservableCounter("aint", metric.WithInt64Callback(cback)) assert.NoError(t, err) _, err = m.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveInt64(ctr, 3) return nil }, ctr) assert.NoError(t, err) }, want: metricdata.Metrics{ Name: "aint", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ {Attributes: attrs, Value: 4}, {Value: 3}, }, }, }, }, { name: "ObservableInt64UpDownCount", fn: func(t *testing.T, m metric.Meter) { cback := func(_ context.Context, o metric.Int64Observer) error { o.Observe(4, opt) return nil } ctr, err := m.Int64ObservableUpDownCounter("aint", metric.WithInt64Callback(cback)) assert.NoError(t, err) _, err = m.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveInt64(ctr, 11) return nil }, ctr) assert.NoError(t, err) }, want: metricdata.Metrics{ Name: "aint", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: false, DataPoints: []metricdata.DataPoint[int64]{ {Attributes: attrs, Value: 4}, {Value: 11}, }, }, }, }, { name: "ObservableInt64Gauge", fn: func(t *testing.T, m metric.Meter) { cback := func(_ context.Context, o metric.Int64Observer) error { o.Observe(4, opt) return nil } gauge, err := m.Int64ObservableGauge("agauge", metric.WithInt64Callback(cback)) assert.NoError(t, err) _, err = m.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveInt64(gauge, 11) return nil }, gauge) assert.NoError(t, err) }, want: metricdata.Metrics{ Name: "agauge", Data: metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: attrs, Value: 4}, {Value: 11}, }, }, }, }, { name: "ObservableFloat64Count", fn: func(t *testing.T, m metric.Meter) { cback := func(_ context.Context, o metric.Float64Observer) error { o.Observe(4, opt) return nil } ctr, err := m.Float64ObservableCounter("afloat", metric.WithFloat64Callback(cback)) assert.NoError(t, err) _, err = m.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveFloat64(ctr, 3) return nil }, ctr) assert.NoError(t, err) }, want: metricdata.Metrics{ Name: "afloat", Data: metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[float64]{ {Attributes: attrs, Value: 4}, {Value: 3}, }, }, }, }, { name: "ObservableFloat64UpDownCount", fn: func(t *testing.T, m metric.Meter) { cback := func(_ context.Context, o metric.Float64Observer) error { o.Observe(4, opt) return nil } ctr, err := m.Float64ObservableUpDownCounter("afloat", metric.WithFloat64Callback(cback)) assert.NoError(t, err) _, err = m.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveFloat64(ctr, 11) return nil }, ctr) assert.NoError(t, err) }, want: metricdata.Metrics{ Name: "afloat", Data: metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: false, DataPoints: []metricdata.DataPoint[float64]{ {Attributes: attrs, Value: 4}, {Value: 11}, }, }, }, }, { name: "ObservableFloat64Gauge", fn: func(t *testing.T, m metric.Meter) { cback := func(_ context.Context, o metric.Float64Observer) error { o.Observe(4, opt) return nil } gauge, err := m.Float64ObservableGauge("agauge", metric.WithFloat64Callback(cback)) assert.NoError(t, err) _, err = m.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveFloat64(gauge, 11) return nil }, gauge) assert.NoError(t, err) }, want: metricdata.Metrics{ Name: "agauge", Data: metricdata.Gauge[float64]{ DataPoints: []metricdata.DataPoint[float64]{ {Attributes: attrs, Value: 4}, {Value: 11}, }, }, }, }, { name: "SyncInt64Count", fn: func(t *testing.T, m metric.Meter) { ctr, err := m.Int64Counter("sint") assert.NoError(t, err) ctr.Add(context.Background(), 3) }, want: metricdata.Metrics{ Name: "sint", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ {Value: 3}, }, }, }, }, { name: "SyncInt64UpDownCount", fn: func(t *testing.T, m metric.Meter) { ctr, err := m.Int64UpDownCounter("sint") assert.NoError(t, err) ctr.Add(context.Background(), 11) }, want: metricdata.Metrics{ Name: "sint", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: false, DataPoints: []metricdata.DataPoint[int64]{ {Value: 11}, }, }, }, }, { name: "SyncInt64Histogram", fn: func(t *testing.T, m metric.Meter) { gauge, err := m.Int64Histogram("histogram") assert.NoError(t, err) gauge.Record(context.Background(), 7) }, want: metricdata.Metrics{ Name: "histogram", Data: metricdata.Histogram[int64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[int64]{ { Attributes: attribute.Set{}, Count: 1, Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: []uint64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Min: metricdata.NewExtrema[int64](7), Max: metricdata.NewExtrema[int64](7), Sum: 7, }, }, }, }, }, { name: "SyncFloat64Count", fn: func(t *testing.T, m metric.Meter) { ctr, err := m.Float64Counter("sfloat") assert.NoError(t, err) ctr.Add(context.Background(), 3) }, want: metricdata.Metrics{ Name: "sfloat", Data: metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[float64]{ {Value: 3}, }, }, }, }, { name: "SyncFloat64UpDownCount", fn: func(t *testing.T, m metric.Meter) { ctr, err := m.Float64UpDownCounter("sfloat") assert.NoError(t, err) ctr.Add(context.Background(), 11) }, want: metricdata.Metrics{ Name: "sfloat", Data: metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: false, DataPoints: []metricdata.DataPoint[float64]{ {Value: 11}, }, }, }, }, { name: "SyncFloat64Histogram", fn: func(t *testing.T, m metric.Meter) { gauge, err := m.Float64Histogram("histogram") assert.NoError(t, err) gauge.Record(context.Background(), 7) }, want: metricdata.Metrics{ Name: "histogram", Data: metricdata.Histogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{ { Attributes: attribute.Set{}, Count: 1, Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: []uint64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Min: metricdata.NewExtrema[float64](7.), Max: metricdata.NewExtrema[float64](7.), Sum: 7.0, }, }, }, }, }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { rdr := NewManualReader() m := NewMeterProvider(WithReader(rdr)).Meter("testInstruments") tt.fn(t, m) rm := metricdata.ResourceMetrics{} err := rdr.Collect(context.Background(), &rm) assert.NoError(t, err) require.Len(t, rm.ScopeMetrics, 1) sm := rm.ScopeMetrics[0] require.Len(t, sm.Metrics, 1) got := sm.Metrics[0] metricdatatest.AssertEqual(t, tt.want, got, metricdatatest.IgnoreTimestamp()) }) } } func TestMeterCreatesInstrumentsValidations(t *testing.T) { testCases := []struct { name string fn func(*testing.T, metric.Meter) error wantErr error }{ { name: "Int64Counter with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64Counter("counter") assert.NotNil(t, i) return err }, }, { name: "Int64Counter with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64Counter("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Int64UpDownCounter with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64UpDownCounter("upDownCounter") assert.NotNil(t, i) return err }, }, { name: "Int64UpDownCounter with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64UpDownCounter("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Int64Histogram with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64Histogram("histogram") assert.NotNil(t, i) return err }, }, { name: "Int64Histogram with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64Histogram("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Int64Histogram with invalid buckets", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64Histogram("histogram", metric.WithExplicitBucketBoundaries(-1, 1, -5)) assert.NotNil(t, i) return err }, wantErr: errors.Join(fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, []float64{-1, 1, -5})), }, { name: "Int64ObservableCounter with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64ObservableCounter("aint") assert.NotNil(t, i) return err }, }, { name: "Int64ObservableCounter with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64ObservableCounter("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Int64ObservableUpDownCounter with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64ObservableUpDownCounter("aint") assert.NotNil(t, i) return err }, }, { name: "Int64ObservableUpDownCounter with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64ObservableUpDownCounter("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Int64ObservableGauge with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64ObservableGauge("aint") assert.NotNil(t, i) return err }, }, { name: "Int64ObservableGauge with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64ObservableGauge("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Float64Counter with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64Counter("counter") assert.NotNil(t, i) return err }, }, { name: "Float64Counter with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64Counter("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Float64UpDownCounter with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64UpDownCounter("upDownCounter") assert.NotNil(t, i) return err }, }, { name: "Float64UpDownCounter with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64UpDownCounter("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Float64Histogram with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64Histogram("histogram") assert.NotNil(t, i) return err }, }, { name: "Float64Histogram with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64Histogram("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Float64Histogram with invalid buckets", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64Histogram("histogram", metric.WithExplicitBucketBoundaries(-1, 1, -5)) assert.NotNil(t, i) return err }, wantErr: errors.Join(fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, []float64{-1, 1, -5})), }, { name: "Float64ObservableCounter with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64ObservableCounter("aint") assert.NotNil(t, i) return err }, }, { name: "Float64ObservableCounter with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Int64ObservableCounter("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Float64ObservableUpDownCounter with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64ObservableUpDownCounter("aint") assert.NotNil(t, i) return err }, }, { name: "Float64ObservableUpDownCounter with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64ObservableUpDownCounter("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, { name: "Float64ObservableGauge with no validation issues", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64ObservableGauge("aint") assert.NotNil(t, i) return err }, }, { name: "Float64ObservableGauge with an invalid name", fn: func(t *testing.T, m metric.Meter) error { i, err := m.Float64ObservableGauge("_") assert.NotNil(t, i) return err }, wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName), }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { m := NewMeterProvider().Meter("testInstruments") err := tt.fn(t, m) assert.Equal(t, err, tt.wantErr) }) } } func TestValidateInstrumentName(t *testing.T) { const longName = "longNameOver255characters" + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" testCases := []struct { name string wantErr error }{ { name: "", wantErr: fmt.Errorf("%w: : is empty", ErrInstrumentName), }, { name: "1", wantErr: fmt.Errorf("%w: 1: must start with a letter", ErrInstrumentName), }, { name: "a", }, { name: "n4me", }, { name: "n-me", }, { name: "na_e", }, { name: "nam.", }, { name: "nam/e", }, { name: "name!", wantErr: fmt.Errorf("%w: name!: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName), }, { name: longName, wantErr: fmt.Errorf("%w: %s: longer than 255 characters", ErrInstrumentName, longName), }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { assert.Equal(t, tt.wantErr, validateInstrumentName(tt.name)) }) } } func TestRegisterNonSDKObserverErrors(t *testing.T) { rdr := NewManualReader() mp := NewMeterProvider(WithReader(rdr)) meter := mp.Meter("scope") type obsrv struct{ metric.Observable } o := obsrv{} _, err := meter.RegisterCallback( func(context.Context, metric.Observer) error { return nil }, o, ) assert.ErrorContains( t, err, "invalid observable: from different implementation", "External instrument registered", ) } func TestMeterMixingOnRegisterErrors(t *testing.T) { rdr := NewManualReader() mp := NewMeterProvider(WithReader(rdr)) m1 := mp.Meter("scope1") m2 := mp.Meter("scope2") iCtr, err := m2.Int64ObservableCounter("int64ctr") require.NoError(t, err) fCtr, err := m2.Float64ObservableCounter("float64ctr") require.NoError(t, err) _, err = m1.RegisterCallback( func(context.Context, metric.Observer) error { return nil }, iCtr, fCtr, ) assert.ErrorContains( t, err, `invalid registration: observable "int64ctr" from Meter "scope2", registered with Meter "scope1"`, "Instrument registered with non-creation Meter", ) assert.ErrorContains( t, err, `invalid registration: observable "float64ctr" from Meter "scope2", registered with Meter "scope1"`, "Instrument registered with non-creation Meter", ) } func TestCallbackObserverNonRegistered(t *testing.T) { rdr := NewManualReader() mp := NewMeterProvider(WithReader(rdr)) m1 := mp.Meter("scope1") valid, err := m1.Int64ObservableCounter("ctr") require.NoError(t, err) m2 := mp.Meter("scope2") iCtr, err := m2.Int64ObservableCounter("int64ctr") require.NoError(t, err) fCtr, err := m2.Float64ObservableCounter("float64ctr") require.NoError(t, err) type int64Obsrv struct{ metric.Int64Observable } int64Foreign := int64Obsrv{} type float64Obsrv struct{ metric.Float64Observable } float64Foreign := float64Obsrv{} _, err = m1.RegisterCallback( func(_ context.Context, o metric.Observer) error { o.ObserveInt64(valid, 1) o.ObserveInt64(iCtr, 1) o.ObserveFloat64(fCtr, 1) o.ObserveInt64(int64Foreign, 1) o.ObserveFloat64(float64Foreign, 1) return nil }, valid, ) require.NoError(t, err) var got metricdata.ResourceMetrics assert.NotPanics(t, func() { err = rdr.Collect(context.Background(), &got) }) assert.NoError(t, err) want := metricdata.ResourceMetrics{ Resource: resource.Default(), ScopeMetrics: []metricdata.ScopeMetrics{ { Scope: instrumentation.Scope{ Name: "scope1", }, Metrics: []metricdata.Metrics{ { Name: "ctr", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ { Value: 1, }, }, }, }, }, }, }, } metricdatatest.AssertEqual(t, want, got, metricdatatest.IgnoreTimestamp()) } type logSink struct { logr.LogSink messages []string } func newLogSink(t *testing.T) *logSink { return &logSink{LogSink: testr.New(t).GetSink()} } func (l *logSink) Info(level int, msg string, keysAndValues ...interface{}) { l.messages = append(l.messages, msg) l.LogSink.Info(level, msg, keysAndValues...) } func (l *logSink) Error(err error, msg string, keysAndValues ...interface{}) { l.messages = append(l.messages, fmt.Sprintf("%s: %s", err, msg)) l.LogSink.Error(err, msg, keysAndValues...) } func (l *logSink) String() string { out := make([]string, len(l.messages)) for i := range l.messages { out[i] = "\t-" + l.messages[i] } return strings.Join(out, "\n") } func TestGlobalInstRegisterCallback(t *testing.T) { l := newLogSink(t) otel.SetLogger(logr.New(l)) const mtrName = "TestGlobalInstRegisterCallback" preMtr := otel.Meter(mtrName) preInt64Ctr, err := preMtr.Int64ObservableCounter("pre.int64.counter") require.NoError(t, err) preFloat64Ctr, err := preMtr.Float64ObservableCounter("pre.float64.counter") require.NoError(t, err) rdr := NewManualReader() mp := NewMeterProvider(WithReader(rdr), WithResource(resource.Empty())) otel.SetMeterProvider(mp) postMtr := otel.Meter(mtrName) postInt64Ctr, err := postMtr.Int64ObservableCounter("post.int64.counter") require.NoError(t, err) postFloat64Ctr, err := postMtr.Float64ObservableCounter("post.float64.counter") require.NoError(t, err) cb := func(_ context.Context, o metric.Observer) error { o.ObserveInt64(preInt64Ctr, 1) o.ObserveFloat64(preFloat64Ctr, 2) o.ObserveInt64(postInt64Ctr, 3) o.ObserveFloat64(postFloat64Ctr, 4) return nil } _, err = preMtr.RegisterCallback(cb, preInt64Ctr, preFloat64Ctr, postInt64Ctr, postFloat64Ctr) assert.NoError(t, err) got := metricdata.ResourceMetrics{} err = rdr.Collect(context.Background(), &got) assert.NoError(t, err) assert.Lenf(t, l.messages, 0, "Warnings and errors logged:\n%s", l) metricdatatest.AssertEqual(t, metricdata.ResourceMetrics{ ScopeMetrics: []metricdata.ScopeMetrics{ { Scope: instrumentation.Scope{Name: "TestGlobalInstRegisterCallback"}, Metrics: []metricdata.Metrics{ { Name: "pre.int64.counter", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{{Value: 1}}, }, }, { Name: "pre.float64.counter", Data: metricdata.Sum[float64]{ DataPoints: []metricdata.DataPoint[float64]{{Value: 2}}, Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, }, }, { Name: "post.int64.counter", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{{Value: 3}}, }, }, { Name: "post.float64.counter", Data: metricdata.Sum[float64]{ DataPoints: []metricdata.DataPoint[float64]{{Value: 4}}, Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, }, }, }, }, }, }, got, metricdatatest.IgnoreTimestamp()) } func TestMetersProvideScope(t *testing.T) { rdr := NewManualReader() mp := NewMeterProvider(WithReader(rdr)) m1 := mp.Meter("scope1") ctr1, err := m1.Float64ObservableCounter("ctr1") assert.NoError(t, err) _, err = m1.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveFloat64(ctr1, 5) return nil }, ctr1) assert.NoError(t, err) m2 := mp.Meter("scope2") ctr2, err := m2.Int64ObservableCounter("ctr2") assert.NoError(t, err) _, err = m2.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveInt64(ctr2, 7) return nil }, ctr2) assert.NoError(t, err) want := metricdata.ResourceMetrics{ Resource: resource.Default(), ScopeMetrics: []metricdata.ScopeMetrics{ { Scope: instrumentation.Scope{ Name: "scope1", }, Metrics: []metricdata.Metrics{ { Name: "ctr1", Data: metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[float64]{ { Value: 5, }, }, }, }, }, }, { Scope: instrumentation.Scope{ Name: "scope2", }, Metrics: []metricdata.Metrics{ { Name: "ctr2", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ { Value: 7, }, }, }, }, }, }, }, } got := metricdata.ResourceMetrics{} err = rdr.Collect(context.Background(), &got) assert.NoError(t, err) metricdatatest.AssertEqual(t, want, got, metricdatatest.IgnoreTimestamp()) } func TestUnregisterUnregisters(t *testing.T) { r := NewManualReader() mp := NewMeterProvider(WithReader(r)) m := mp.Meter("TestUnregisterUnregisters") int64Counter, err := m.Int64ObservableCounter("int64.counter") require.NoError(t, err) int64UpDownCounter, err := m.Int64ObservableUpDownCounter("int64.up_down_counter") require.NoError(t, err) int64Gauge, err := m.Int64ObservableGauge("int64.gauge") require.NoError(t, err) floag64Counter, err := m.Float64ObservableCounter("floag64.counter") require.NoError(t, err) floag64UpDownCounter, err := m.Float64ObservableUpDownCounter("floag64.up_down_counter") require.NoError(t, err) floag64Gauge, err := m.Float64ObservableGauge("floag64.gauge") require.NoError(t, err) var called bool reg, err := m.RegisterCallback( func(context.Context, metric.Observer) error { called = true return nil }, int64Counter, int64UpDownCounter, int64Gauge, floag64Counter, floag64UpDownCounter, floag64Gauge, ) require.NoError(t, err) ctx := context.Background() err = r.Collect(ctx, &metricdata.ResourceMetrics{}) require.NoError(t, err) assert.True(t, called, "callback not called for registered callback") called = false require.NoError(t, reg.Unregister(), "unregister") err = r.Collect(ctx, &metricdata.ResourceMetrics{}) require.NoError(t, err) assert.False(t, called, "callback called for unregistered callback") } func TestRegisterCallbackDropAggregations(t *testing.T) { aggFn := func(InstrumentKind) Aggregation { return AggregationDrop{} } r := NewManualReader(WithAggregationSelector(aggFn)) mp := NewMeterProvider(WithReader(r)) m := mp.Meter("testRegisterCallbackDropAggregations") int64Counter, err := m.Int64ObservableCounter("int64.counter") require.NoError(t, err) int64UpDownCounter, err := m.Int64ObservableUpDownCounter("int64.up_down_counter") require.NoError(t, err) int64Gauge, err := m.Int64ObservableGauge("int64.gauge") require.NoError(t, err) floag64Counter, err := m.Float64ObservableCounter("floag64.counter") require.NoError(t, err) floag64UpDownCounter, err := m.Float64ObservableUpDownCounter("floag64.up_down_counter") require.NoError(t, err) floag64Gauge, err := m.Float64ObservableGauge("floag64.gauge") require.NoError(t, err) var called bool _, err = m.RegisterCallback( func(context.Context, metric.Observer) error { called = true return nil }, int64Counter, int64UpDownCounter, int64Gauge, floag64Counter, floag64UpDownCounter, floag64Gauge, ) require.NoError(t, err) data := metricdata.ResourceMetrics{} err = r.Collect(context.Background(), &data) require.NoError(t, err) assert.False(t, called, "callback called for all drop instruments") assert.Len(t, data.ScopeMetrics, 0, "metrics exported for drop instruments") } func TestAttributeFilter(t *testing.T) { t.Run("Delta", testAttributeFilter(metricdata.DeltaTemporality)) t.Run("Cumulative", testAttributeFilter(metricdata.CumulativeTemporality)) } func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) { fooBar := attribute.NewSet(attribute.String("foo", "bar")) withFooBar := metric.WithAttributeSet(fooBar) v1 := attribute.NewSet(attribute.String("foo", "bar"), attribute.Int("version", 1)) withV1 := metric.WithAttributeSet(v1) v2 := attribute.NewSet(attribute.String("foo", "bar"), attribute.Int("version", 2)) withV2 := metric.WithAttributeSet(v2) testcases := []struct { name string register func(t *testing.T, mtr metric.Meter) error wantMetric metricdata.Metrics }{ { name: "ObservableFloat64Counter", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64ObservableCounter("afcounter") if err != nil { return err } _, err = mtr.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveFloat64(ctr, 1.0, withV1) o.ObserveFloat64(ctr, 2.0, withFooBar) o.ObserveFloat64(ctr, 1.0, withV2) return nil }, ctr) return err }, wantMetric: metricdata.Metrics{ Name: "afcounter", Data: metricdata.Sum[float64]{ DataPoints: []metricdata.DataPoint[float64]{ {Attributes: fooBar, Value: 4.0}, }, Temporality: temporality, IsMonotonic: true, }, }, }, { name: "ObservableFloat64UpDownCounter", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64ObservableUpDownCounter("afupdowncounter") if err != nil { return err } _, err = mtr.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveFloat64(ctr, 1.0, withV1) o.ObserveFloat64(ctr, 2.0, withFooBar) o.ObserveFloat64(ctr, 1.0, withV2) return nil }, ctr) return err }, wantMetric: metricdata.Metrics{ Name: "afupdowncounter", Data: metricdata.Sum[float64]{ DataPoints: []metricdata.DataPoint[float64]{ { Attributes: attribute.NewSet(attribute.String("foo", "bar")), Value: 4.0, }, }, Temporality: temporality, IsMonotonic: false, }, }, }, { name: "ObservableFloat64Gauge", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64ObservableGauge("afgauge") if err != nil { return err } _, err = mtr.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveFloat64(ctr, 1.0, withV1) o.ObserveFloat64(ctr, 2.0, withV2) return nil }, ctr) return err }, wantMetric: metricdata.Metrics{ Name: "afgauge", Data: metricdata.Gauge[float64]{ DataPoints: []metricdata.DataPoint[float64]{ {Attributes: fooBar, Value: 2.0}, }, }, }, }, { name: "ObservableInt64Counter", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64ObservableCounter("aicounter") if err != nil { return err } _, err = mtr.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveInt64(ctr, 10, withV1) o.ObserveInt64(ctr, 20, withFooBar) o.ObserveInt64(ctr, 10, withV2) return nil }, ctr) return err }, wantMetric: metricdata.Metrics{ Name: "aicounter", Data: metricdata.Sum[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: fooBar, Value: 40}, }, Temporality: temporality, IsMonotonic: true, }, }, }, { name: "ObservableInt64UpDownCounter", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64ObservableUpDownCounter("aiupdowncounter") if err != nil { return err } _, err = mtr.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveInt64(ctr, 10, withV1) o.ObserveInt64(ctr, 20, withFooBar) o.ObserveInt64(ctr, 10, withV2) return nil }, ctr) return err }, wantMetric: metricdata.Metrics{ Name: "aiupdowncounter", Data: metricdata.Sum[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: fooBar, Value: 40}, }, Temporality: temporality, IsMonotonic: false, }, }, }, { name: "ObservableInt64Gauge", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64ObservableGauge("aigauge") if err != nil { return err } _, err = mtr.RegisterCallback(func(_ context.Context, o metric.Observer) error { o.ObserveInt64(ctr, 10, withV1) o.ObserveInt64(ctr, 20, withV2) return nil }, ctr) return err }, wantMetric: metricdata.Metrics{ Name: "aigauge", Data: metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: fooBar, Value: 20}, }, }, }, }, { name: "SyncFloat64Counter", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64Counter("sfcounter") if err != nil { return err } ctr.Add(context.Background(), 1.0, withV1) ctr.Add(context.Background(), 2.0, withV2) return nil }, wantMetric: metricdata.Metrics{ Name: "sfcounter", Data: metricdata.Sum[float64]{ DataPoints: []metricdata.DataPoint[float64]{ {Attributes: fooBar, Value: 3.0}, }, Temporality: temporality, IsMonotonic: true, }, }, }, { name: "SyncFloat64UpDownCounter", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64UpDownCounter("sfupdowncounter") if err != nil { return err } ctr.Add(context.Background(), 1.0, withV1) ctr.Add(context.Background(), 2.0, withV2) return nil }, wantMetric: metricdata.Metrics{ Name: "sfupdowncounter", Data: metricdata.Sum[float64]{ DataPoints: []metricdata.DataPoint[float64]{ {Attributes: fooBar, Value: 3.0}, }, Temporality: temporality, IsMonotonic: false, }, }, }, { name: "SyncFloat64Histogram", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Float64Histogram("sfhistogram") if err != nil { return err } ctr.Record(context.Background(), 1.0, withV1) ctr.Record(context.Background(), 2.0, withV2) return nil }, wantMetric: metricdata.Metrics{ Name: "sfhistogram", Data: metricdata.Histogram[float64]{ DataPoints: []metricdata.HistogramDataPoint[float64]{ { Attributes: fooBar, Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: []uint64{0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Count: 2, Min: metricdata.NewExtrema(1.), Max: metricdata.NewExtrema(2.), Sum: 3.0, }, }, Temporality: temporality, }, }, }, { name: "SyncInt64Counter", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64Counter("sicounter") if err != nil { return err } ctr.Add(context.Background(), 10, withV1) ctr.Add(context.Background(), 20, withV2) return nil }, wantMetric: metricdata.Metrics{ Name: "sicounter", Data: metricdata.Sum[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: fooBar, Value: 30}, }, Temporality: temporality, IsMonotonic: true, }, }, }, { name: "SyncInt64UpDownCounter", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64UpDownCounter("siupdowncounter") if err != nil { return err } ctr.Add(context.Background(), 10, withV1) ctr.Add(context.Background(), 20, withV2) return nil }, wantMetric: metricdata.Metrics{ Name: "siupdowncounter", Data: metricdata.Sum[int64]{ DataPoints: []metricdata.DataPoint[int64]{ {Attributes: fooBar, Value: 30}, }, Temporality: temporality, IsMonotonic: false, }, }, }, { name: "SyncInt64Histogram", register: func(t *testing.T, mtr metric.Meter) error { ctr, err := mtr.Int64Histogram("sihistogram") if err != nil { return err } ctr.Record(context.Background(), 1, withV1) ctr.Record(context.Background(), 2, withV2) return nil }, wantMetric: metricdata.Metrics{ Name: "sihistogram", Data: metricdata.Histogram[int64]{ DataPoints: []metricdata.HistogramDataPoint[int64]{ { Attributes: fooBar, Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: []uint64{0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Count: 2, Min: metricdata.NewExtrema[int64](1), Max: metricdata.NewExtrema[int64](2), Sum: 3.0, }, }, Temporality: temporality, }, }, }, } return func(t *testing.T) { for _, tt := range testcases { t.Run(tt.name, func(t *testing.T) { rdr := NewManualReader(WithTemporalitySelector(func(InstrumentKind) metricdata.Temporality { return temporality })) mtr := NewMeterProvider( WithReader(rdr), WithView(NewView( Instrument{Name: "*"}, Stream{AttributeFilter: attribute.NewAllowKeysFilter("foo")}, )), ).Meter("TestAttributeFilter") require.NoError(t, tt.register(t, mtr)) m := metricdata.ResourceMetrics{} err := rdr.Collect(context.Background(), &m) assert.NoError(t, err) require.Len(t, m.ScopeMetrics, 1) require.Len(t, m.ScopeMetrics[0].Metrics, 1) metricdatatest.AssertEqual(t, tt.wantMetric, m.ScopeMetrics[0].Metrics[0], metricdatatest.IgnoreTimestamp()) }) } } } func TestObservableExample(t *testing.T) { // This example can be found: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/metrics/supplementary-guidelines.md#asynchronous-example var ( threadID1 = attribute.Int("tid", 1) threadID2 = attribute.Int("tid", 2) threadID3 = attribute.Int("tid", 3) processID1001 = attribute.String("pid", "1001") thread1 = attribute.NewSet(processID1001, threadID1) thread2 = attribute.NewSet(processID1001, threadID2) thread3 = attribute.NewSet(processID1001, threadID3) process1001 = attribute.NewSet(processID1001) ) setup := func(t *testing.T, temp metricdata.Temporality) (map[attribute.Set]int64, func(*testing.T), *metricdata.ScopeMetrics, *int64, *int64, *int64) { t.Helper() const ( instName = "pageFaults" filteredStream = "filteredPageFaults" scopeName = "ObservableExample" ) selector := func(InstrumentKind) metricdata.Temporality { return temp } reader := NewManualReader(WithTemporalitySelector(selector)) allowAll := attribute.NewDenyKeysFilter() noFiltered := NewView(Instrument{Name: instName}, Stream{Name: instName, AttributeFilter: allowAll}) filter := attribute.NewDenyKeysFilter("tid") filtered := NewView(Instrument{Name: instName}, Stream{Name: filteredStream, AttributeFilter: filter}) mp := NewMeterProvider(WithReader(reader), WithView(noFiltered, filtered)) meter := mp.Meter(scopeName) observations := make(map[attribute.Set]int64) _, err := meter.Int64ObservableCounter(instName, metric.WithInt64Callback( func(_ context.Context, o metric.Int64Observer) error { for attrSet, val := range observations { o.Observe(val, metric.WithAttributeSet(attrSet)) } return nil }, )) require.NoError(t, err) want := &metricdata.ScopeMetrics{ Scope: instrumentation.Scope{Name: scopeName}, Metrics: []metricdata.Metrics{ { Name: filteredStream, Data: metricdata.Sum[int64]{ Temporality: temp, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ {Attributes: process1001}, }, }, }, { Name: instName, Data: metricdata.Sum[int64]{ Temporality: temp, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ {Attributes: thread1}, {Attributes: thread2}, }, }, }, }, } wantFiltered := &want.Metrics[0].Data.(metricdata.Sum[int64]).DataPoints[0].Value wantThread1 := &want.Metrics[1].Data.(metricdata.Sum[int64]).DataPoints[0].Value wantThread2 := &want.Metrics[1].Data.(metricdata.Sum[int64]).DataPoints[1].Value collect := func(t *testing.T) { t.Helper() got := metricdata.ResourceMetrics{} err := reader.Collect(context.Background(), &got) require.NoError(t, err) require.Len(t, got.ScopeMetrics, 1) metricdatatest.AssertEqual(t, *want, got.ScopeMetrics[0], metricdatatest.IgnoreTimestamp()) } return observations, collect, want, wantFiltered, wantThread1, wantThread2 } t.Run("Cumulative", func(t *testing.T) { temporality := metricdata.CumulativeTemporality observations, verify, want, wantFiltered, wantThread1, wantThread2 := setup(t, temporality) // During the time range (T0, T1]: // pid = 1001, tid = 1, #PF = 50 // pid = 1001, tid = 2, #PF = 30 observations[thread1] = 50 observations[thread2] = 30 *wantFiltered = 80 *wantThread1 = 50 *wantThread2 = 30 verify(t) // During the time range (T1, T2]: // pid = 1001, tid = 1, #PF = 53 // pid = 1001, tid = 2, #PF = 38 observations[thread1] = 53 observations[thread2] = 38 *wantFiltered = 91 *wantThread1 = 53 *wantThread2 = 38 verify(t) // During the time range (T2, T3] // pid = 1001, tid = 1, #PF = 56 // pid = 1001, tid = 2, #PF = 42 observations[thread1] = 56 observations[thread2] = 42 *wantFiltered = 98 *wantThread1 = 56 *wantThread2 = 42 verify(t) // During the time range (T3, T4]: // pid = 1001, tid = 1, #PF = 60 // pid = 1001, tid = 2, #PF = 47 observations[thread1] = 60 observations[thread2] = 47 *wantFiltered = 107 *wantThread1 = 60 *wantThread2 = 47 verify(t) // During the time range (T4, T5]: // thread 1 died, thread 3 started // pid = 1001, tid = 2, #PF = 53 // pid = 1001, tid = 3, #PF = 5 delete(observations, thread1) observations[thread2] = 53 observations[thread3] = 5 *wantFiltered = 58 want.Metrics[1].Data = metricdata.Sum[int64]{ Temporality: temporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ // Thread 1 is no longer exported. {Attributes: thread2, Value: 53}, {Attributes: thread3, Value: 5}, }, } verify(t) }) t.Run("Delta", func(t *testing.T) { temporality := metricdata.DeltaTemporality observations, verify, want, wantFiltered, wantThread1, wantThread2 := setup(t, temporality) // During the time range (T0, T1]: // pid = 1001, tid = 1, #PF = 50 // pid = 1001, tid = 2, #PF = 30 observations[thread1] = 50 observations[thread2] = 30 *wantFiltered = 80 *wantThread1 = 50 *wantThread2 = 30 verify(t) // During the time range (T1, T2]: // pid = 1001, tid = 1, #PF = 53 // pid = 1001, tid = 2, #PF = 38 observations[thread1] = 53 observations[thread2] = 38 *wantFiltered = 11 *wantThread1 = 3 *wantThread2 = 8 verify(t) // During the time range (T2, T3] // pid = 1001, tid = 1, #PF = 56 // pid = 1001, tid = 2, #PF = 42 observations[thread1] = 56 observations[thread2] = 42 *wantFiltered = 7 *wantThread1 = 3 *wantThread2 = 4 verify(t) // During the time range (T3, T4]: // pid = 1001, tid = 1, #PF = 60 // pid = 1001, tid = 2, #PF = 47 observations[thread1] = 60 observations[thread2] = 47 *wantFiltered = 9 *wantThread1 = 4 *wantThread2 = 5 verify(t) // During the time range (T4, T5]: // thread 1 died, thread 3 started // pid = 1001, tid = 2, #PF = 53 // pid = 1001, tid = 3, #PF = 5 delete(observations, thread1) observations[thread2] = 53 observations[thread3] = 5 *wantFiltered = -49 want.Metrics[1].Data = metricdata.Sum[int64]{ Temporality: temporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ // Thread 1 is no longer exported. {Attributes: thread2, Value: 6}, {Attributes: thread3, Value: 5}, }, } verify(t) }) } var ( aiCounter metric.Int64ObservableCounter aiUpDownCounter metric.Int64ObservableUpDownCounter aiGauge metric.Int64ObservableGauge afCounter metric.Float64ObservableCounter afUpDownCounter metric.Float64ObservableUpDownCounter afGauge metric.Float64ObservableGauge siCounter metric.Int64Counter siUpDownCounter metric.Int64UpDownCounter siHistogram metric.Int64Histogram sfCounter metric.Float64Counter sfUpDownCounter metric.Float64UpDownCounter sfHistogram metric.Float64Histogram ) func BenchmarkInstrumentCreation(b *testing.B) { provider := NewMeterProvider(WithReader(NewManualReader())) meter := provider.Meter("BenchmarkInstrumentCreation") b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { aiCounter, _ = meter.Int64ObservableCounter("observable.int64.counter") aiUpDownCounter, _ = meter.Int64ObservableUpDownCounter("observable.int64.up.down.counter") aiGauge, _ = meter.Int64ObservableGauge("observable.int64.gauge") afCounter, _ = meter.Float64ObservableCounter("observable.float64.counter") afUpDownCounter, _ = meter.Float64ObservableUpDownCounter("observable.float64.up.down.counter") afGauge, _ = meter.Float64ObservableGauge("observable.float64.gauge") siCounter, _ = meter.Int64Counter("sync.int64.counter") siUpDownCounter, _ = meter.Int64UpDownCounter("sync.int64.up.down.counter") siHistogram, _ = meter.Int64Histogram("sync.int64.histogram") sfCounter, _ = meter.Float64Counter("sync.float64.counter") sfUpDownCounter, _ = meter.Float64UpDownCounter("sync.float64.up.down.counter") sfHistogram, _ = meter.Float64Histogram("sync.float64.histogram") } } func testNilAggregationSelector(InstrumentKind) Aggregation { return nil } func testDefaultAggregationSelector(InstrumentKind) Aggregation { return AggregationDefault{} } func testUndefinedTemporalitySelector(InstrumentKind) metricdata.Temporality { return metricdata.Temporality(0) } func testInvalidTemporalitySelector(InstrumentKind) metricdata.Temporality { return metricdata.Temporality(255) } type noErrorHandler struct { t *testing.T } func (h noErrorHandler) Handle(err error) { assert.NoError(h.t, err) } func TestMalformedSelectors(t *testing.T) { type testCase struct { name string reader Reader } testCases := []testCase{ { name: "nil aggregation selector", reader: NewManualReader(WithAggregationSelector(testNilAggregationSelector)), }, { name: "nil aggregation selector periodic", reader: NewPeriodicReader(&fnExporter{aggregationFunc: testNilAggregationSelector}), }, { name: "default aggregation selector", reader: NewManualReader(WithAggregationSelector(testDefaultAggregationSelector)), }, { name: "default aggregation selector periodic", reader: NewPeriodicReader(&fnExporter{aggregationFunc: testDefaultAggregationSelector}), }, { name: "undefined temporality selector", reader: NewManualReader(WithTemporalitySelector(testUndefinedTemporalitySelector)), }, { name: "undefined temporality selector periodic", reader: NewPeriodicReader(&fnExporter{temporalityFunc: testUndefinedTemporalitySelector}), }, { name: "invalid temporality selector", reader: NewManualReader(WithTemporalitySelector(testInvalidTemporalitySelector)), }, { name: "invalid temporality selector periodic", reader: NewPeriodicReader(&fnExporter{temporalityFunc: testInvalidTemporalitySelector}), }, { name: "both aggregation and temporality selector", reader: NewManualReader( WithAggregationSelector(testNilAggregationSelector), WithTemporalitySelector(testUndefinedTemporalitySelector), ), }, { name: "both aggregation and temporality selector periodic", reader: NewPeriodicReader(&fnExporter{ aggregationFunc: testNilAggregationSelector, temporalityFunc: testUndefinedTemporalitySelector, }), }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { origErrorHandler := global.GetErrorHandler() defer global.SetErrorHandler(origErrorHandler) global.SetErrorHandler(noErrorHandler{t}) defer func() { _ = tt.reader.Shutdown(context.Background()) }() meter := NewMeterProvider(WithReader(tt.reader)).Meter("TestNilAggregationSelector") // Create All instruments, they should not error aiCounter, err := meter.Int64ObservableCounter("observable.int64.counter") require.NoError(t, err) aiUpDownCounter, err := meter.Int64ObservableUpDownCounter("observable.int64.up.down.counter") require.NoError(t, err) aiGauge, err := meter.Int64ObservableGauge("observable.int64.gauge") require.NoError(t, err) afCounter, err := meter.Float64ObservableCounter("observable.float64.counter") require.NoError(t, err) afUpDownCounter, err := meter.Float64ObservableUpDownCounter("observable.float64.up.down.counter") require.NoError(t, err) afGauge, err := meter.Float64ObservableGauge("observable.float64.gauge") require.NoError(t, err) siCounter, err := meter.Int64Counter("sync.int64.counter") require.NoError(t, err) siUpDownCounter, err := meter.Int64UpDownCounter("sync.int64.up.down.counter") require.NoError(t, err) siHistogram, err := meter.Int64Histogram("sync.int64.histogram") require.NoError(t, err) sfCounter, err := meter.Float64Counter("sync.float64.counter") require.NoError(t, err) sfUpDownCounter, err := meter.Float64UpDownCounter("sync.float64.up.down.counter") require.NoError(t, err) sfHistogram, err := meter.Float64Histogram("sync.float64.histogram") require.NoError(t, err) callback := func(ctx context.Context, obs metric.Observer) error { obs.ObserveInt64(aiCounter, 1) obs.ObserveInt64(aiUpDownCounter, 1) obs.ObserveInt64(aiGauge, 1) obs.ObserveFloat64(afCounter, 1) obs.ObserveFloat64(afUpDownCounter, 1) obs.ObserveFloat64(afGauge, 1) return nil } _, err = meter.RegisterCallback(callback, aiCounter, aiUpDownCounter, aiGauge, afCounter, afUpDownCounter, afGauge) require.NoError(t, err) siCounter.Add(context.Background(), 1) siUpDownCounter.Add(context.Background(), 1) siHistogram.Record(context.Background(), 1) sfCounter.Add(context.Background(), 1) sfUpDownCounter.Add(context.Background(), 1) sfHistogram.Record(context.Background(), 1) var rm metricdata.ResourceMetrics err = tt.reader.Collect(context.Background(), &rm) require.NoError(t, err) require.Len(t, rm.ScopeMetrics, 1) require.Len(t, rm.ScopeMetrics[0].Metrics, 12) }) } } func TestHistogramBucketPrecedenceOrdering(t *testing.T) { defaultBuckets := []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000} aggregationSelector := func(InstrumentKind) Aggregation { return AggregationExplicitBucketHistogram{Boundaries: []float64{0, 1, 2, 3, 4, 5}} } for _, tt := range []struct { desc string reader Reader views []View histogramOpts []metric.Float64HistogramOption expectedBucketBoundaries []float64 }{ { desc: "default", reader: NewManualReader(), expectedBucketBoundaries: defaultBuckets, }, { desc: "custom reader aggregation overrides default", reader: NewManualReader(WithAggregationSelector(aggregationSelector)), expectedBucketBoundaries: []float64{0, 1, 2, 3, 4, 5}, }, { desc: "overridden by histogram option", reader: NewManualReader(WithAggregationSelector(aggregationSelector)), histogramOpts: []metric.Float64HistogramOption{ metric.WithExplicitBucketBoundaries(0, 2, 4, 6, 8, 10), }, expectedBucketBoundaries: []float64{0, 2, 4, 6, 8, 10}, }, { desc: "overridden by view", reader: NewManualReader(WithAggregationSelector(aggregationSelector)), histogramOpts: []metric.Float64HistogramOption{ metric.WithExplicitBucketBoundaries(0, 2, 4, 6, 8, 10), }, views: []View{NewView(Instrument{Name: "*"}, Stream{ Aggregation: AggregationExplicitBucketHistogram{Boundaries: []float64{0, 3, 6, 9, 12, 15}}, })}, expectedBucketBoundaries: []float64{0, 3, 6, 9, 12, 15}, }, } { t.Run(tt.desc, func(t *testing.T) { meter := NewMeterProvider(WithView(tt.views...), WithReader(tt.reader)).Meter("TestHistogramBucketPrecedenceOrdering") sfHistogram, err := meter.Float64Histogram("sync.float64.histogram", tt.histogramOpts...) require.NoError(t, err) sfHistogram.Record(context.Background(), 1) var rm metricdata.ResourceMetrics err = tt.reader.Collect(context.Background(), &rm) require.NoError(t, err) require.Len(t, rm.ScopeMetrics, 1) require.Len(t, rm.ScopeMetrics[0].Metrics, 1) gotHist, ok := rm.ScopeMetrics[0].Metrics[0].Data.(metricdata.Histogram[float64]) require.True(t, ok) require.Len(t, gotHist.DataPoints, 1) assert.Equal(t, tt.expectedBucketBoundaries, gotHist.DataPoints[0].Bounds) }) } } opentelemetry-go-1.21.0/sdk/metric/metricdata/000077500000000000000000000000001452547353200212655ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/metric/metricdata/data.go000066400000000000000000000242631452547353200225340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" import ( "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" ) // ResourceMetrics is a collection of ScopeMetrics and the associated Resource // that created them. type ResourceMetrics struct { // Resource represents the entity that collected the metrics. Resource *resource.Resource // ScopeMetrics are the collection of metrics with unique Scopes. ScopeMetrics []ScopeMetrics } // ScopeMetrics is a collection of Metrics Produces by a Meter. type ScopeMetrics struct { // Scope is the Scope that the Meter was created with. Scope instrumentation.Scope // Metrics are a list of aggregations created by the Meter. Metrics []Metrics } // Metrics is a collection of one or more aggregated timeseries from an Instrument. type Metrics struct { // Name is the name of the Instrument that created this data. Name string // Description is the description of the Instrument, which can be used in documentation. Description string // Unit is the unit in which the Instrument reports. Unit string // Data is the aggregated data from an Instrument. Data Aggregation } // Aggregation is the store of data reported by an Instrument. // It will be one of: Gauge, Sum, Histogram. type Aggregation interface { privateAggregation() } // Gauge represents a measurement of the current value of an instrument. type Gauge[N int64 | float64] struct { // DataPoints are the individual aggregated measurements with unique // Attributes. DataPoints []DataPoint[N] } func (Gauge[N]) privateAggregation() {} // Sum represents the sum of all measurements of values from an instrument. type Sum[N int64 | float64] struct { // DataPoints are the individual aggregated measurements with unique // Attributes. DataPoints []DataPoint[N] // Temporality describes if the aggregation is reported as the change from the // last report time, or the cumulative changes since a fixed start time. Temporality Temporality // IsMonotonic represents if this aggregation only increases or decreases. IsMonotonic bool } func (Sum[N]) privateAggregation() {} // DataPoint is a single data point in a timeseries. type DataPoint[N int64 | float64] struct { // Attributes is the set of key value pairs that uniquely identify the // timeseries. Attributes attribute.Set // StartTime is when the timeseries was started. (optional) StartTime time.Time `json:",omitempty"` // Time is the time when the timeseries was recorded. (optional) Time time.Time `json:",omitempty"` // Value is the value of this data point. Value N // Exemplars is the sampled Exemplars collected during the timeseries. Exemplars []Exemplar[N] `json:",omitempty"` } // Histogram represents the histogram of all measurements of values from an instrument. type Histogram[N int64 | float64] struct { // DataPoints are the individual aggregated measurements with unique // Attributes. DataPoints []HistogramDataPoint[N] // Temporality describes if the aggregation is reported as the change from the // last report time, or the cumulative changes since a fixed start time. Temporality Temporality } func (Histogram[N]) privateAggregation() {} // HistogramDataPoint is a single histogram data point in a timeseries. type HistogramDataPoint[N int64 | float64] struct { // Attributes is the set of key value pairs that uniquely identify the // timeseries. Attributes attribute.Set // StartTime is when the timeseries was started. StartTime time.Time // Time is the time when the timeseries was recorded. Time time.Time // Count is the number of updates this histogram has been calculated with. Count uint64 // Bounds are the upper bounds of the buckets of the histogram. Because the // last boundary is +infinity this one is implied. Bounds []float64 // BucketCounts is the count of each of the buckets. BucketCounts []uint64 // Min is the minimum value recorded. (optional) Min Extrema[N] // Max is the maximum value recorded. (optional) Max Extrema[N] // Sum is the sum of the values recorded. Sum N // Exemplars is the sampled Exemplars collected during the timeseries. Exemplars []Exemplar[N] `json:",omitempty"` } // ExponentialHistogram represents the histogram of all measurements of values from an instrument. type ExponentialHistogram[N int64 | float64] struct { // DataPoints are the individual aggregated measurements with unique // attributes. DataPoints []ExponentialHistogramDataPoint[N] // Temporality describes if the aggregation is reported as the change from the // last report time, or the cumulative changes since a fixed start time. Temporality Temporality } func (ExponentialHistogram[N]) privateAggregation() {} // ExponentialHistogramDataPoint is a single exponential histogram data point in a timeseries. type ExponentialHistogramDataPoint[N int64 | float64] struct { // Attributes is the set of key value pairs that uniquely identify the // timeseries. Attributes attribute.Set // StartTime is when the timeseries was started. StartTime time.Time // Time is the time when the timeseries was recorded. Time time.Time // Count is the number of updates this histogram has been calculated with. Count uint64 // Min is the minimum value recorded. (optional) Min Extrema[N] // Max is the maximum value recorded. (optional) Max Extrema[N] // Sum is the sum of the values recorded. Sum N // Scale describes the resolution of the histogram. Boundaries are // located at powers of the base, where: // // base = 2 ^ (2 ^ -Scale) Scale int32 // ZeroCount is the number of values whose absolute value // is less than or equal to [ZeroThreshold]. // When ZeroThreshold is 0, this is the number of values that // cannot be expressed using the standard exponential formula // as well as values that have been rounded to zero. // ZeroCount represents the special zero count bucket. ZeroCount uint64 // PositiveBucket is range of positive value bucket counts. PositiveBucket ExponentialBucket // NegativeBucket is range of negative value bucket counts. NegativeBucket ExponentialBucket // ZeroThreshold is the width of the zero region. Where the zero region is // defined as the closed interval [-ZeroThreshold, ZeroThreshold]. ZeroThreshold float64 // Exemplars is the sampled Exemplars collected during the timeseries. Exemplars []Exemplar[N] `json:",omitempty"` } // ExponentialBucket are a set of bucket counts, encoded in a contiguous array // of counts. type ExponentialBucket struct { // Offset is the bucket index of the first entry in the Counts slice. Offset int32 // Counts is an slice where Counts[i] carries the count of the bucket at // index (Offset+i). Counts[i] is the count of values greater than // base^(Offset+i) and less than or equal to base^(Offset+i+1). Counts []uint64 } // Extrema is the minimum or maximum value of a dataset. type Extrema[N int64 | float64] struct { value N valid bool } // NewExtrema returns an Extrema set to v. func NewExtrema[N int64 | float64](v N) Extrema[N] { return Extrema[N]{value: v, valid: true} } // Value returns the Extrema value and true if the Extrema is defined. // Otherwise, if the Extrema is its zero-value, defined will be false. func (e Extrema[N]) Value() (v N, defined bool) { return e.value, e.valid } // Exemplar is a measurement sampled from a timeseries providing a typical // example. type Exemplar[N int64 | float64] struct { // FilteredAttributes are the attributes recorded with the measurement but // filtered out of the timeseries' aggregated data. FilteredAttributes []attribute.KeyValue // Time is the time when the measurement was recorded. Time time.Time // Value is the measured value. Value N // SpanID is the ID of the span that was active during the measurement. If // no span was active or the span was not sampled this will be empty. SpanID []byte `json:",omitempty"` // TraceID is the ID of the trace the active span belonged to during the // measurement. If no span was active or the span was not sampled this will // be empty. TraceID []byte `json:",omitempty"` } // Summary metric data are used to convey quantile summaries, // a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) // data type. // // These data points cannot always be merged in a meaningful way. The Summary // type is only used by bridges from other metrics libraries, and cannot be // produced using OpenTelemetry instrumentation. type Summary struct { // DataPoints are the individual aggregated measurements with unique // attributes. DataPoints []SummaryDataPoint } func (Summary) privateAggregation() {} // SummaryDataPoint is a single data point in a timeseries that describes the // time-varying values of a Summary metric. type SummaryDataPoint struct { // Attributes is the set of key value pairs that uniquely identify the // timeseries. Attributes attribute.Set // StartTime is when the timeseries was started. StartTime time.Time // Time is the time when the timeseries was recorded. Time time.Time // Count is the number of updates this summary has been calculated with. Count uint64 // Sum is the sum of the values recorded. Sum float64 // (Optional) list of values at different quantiles of the distribution calculated // from the current snapshot. The quantiles must be strictly increasing. QuantileValues []QuantileValue } // QuantileValue is the value at a given quantile of a summary. type QuantileValue struct { // Quantile is the quantile of this value. // // Must be in the interval [0.0, 1.0]. Quantile float64 // Value is the value at the given quantile of a summary. // // Quantile values must NOT be negative. Value float64 } opentelemetry-go-1.21.0/sdk/metric/metricdata/metricdatatest/000077500000000000000000000000001452547353200243025ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/metric/metricdata/metricdatatest/assertion.go000066400000000000000000000236631452547353200266520ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package metricdatatest provides testing functionality for use with the // metricdata package. package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" import ( "fmt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // Datatypes are the concrete data-types the metricdata package provides. type Datatypes interface { metricdata.DataPoint[float64] | metricdata.DataPoint[int64] | metricdata.Gauge[float64] | metricdata.Gauge[int64] | metricdata.Histogram[float64] | metricdata.Histogram[int64] | metricdata.HistogramDataPoint[float64] | metricdata.HistogramDataPoint[int64] | metricdata.Extrema[int64] | metricdata.Extrema[float64] | metricdata.Metrics | metricdata.ResourceMetrics | metricdata.ScopeMetrics | metricdata.Sum[float64] | metricdata.Sum[int64] | metricdata.Exemplar[float64] | metricdata.Exemplar[int64] | metricdata.ExponentialHistogram[float64] | metricdata.ExponentialHistogram[int64] | metricdata.ExponentialHistogramDataPoint[float64] | metricdata.ExponentialHistogramDataPoint[int64] | metricdata.ExponentialBucket | metricdata.Summary | metricdata.SummaryDataPoint | metricdata.QuantileValue // Interface types are not allowed in union types, therefore the // Aggregation and Value type from metricdata are not included here. } // TestingT is an interface that implements [testing.T], but without the // private method of [testing.TB], so other testing packages can rely on it as // well. // The methods in this interface must match the [testing.TB] interface. type TestingT interface { Helper() // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. Error(...any) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } type config struct { ignoreTimestamp bool ignoreExemplars bool ignoreValue bool } func newConfig(opts []Option) config { var cfg config for _, opt := range opts { cfg = opt.apply(cfg) } return cfg } // Option allows for fine grain control over how AssertEqual operates. type Option interface { apply(cfg config) config } type fnOption func(cfg config) config func (fn fnOption) apply(cfg config) config { return fn(cfg) } // IgnoreTimestamp disables checking if timestamps are different. func IgnoreTimestamp() Option { return fnOption(func(cfg config) config { cfg.ignoreTimestamp = true return cfg }) } // IgnoreExemplars disables checking if Exemplars are different. func IgnoreExemplars() Option { return fnOption(func(cfg config) config { cfg.ignoreExemplars = true return cfg }) } // IgnoreValue disables checking if values are different. This can be // useful for non-deterministic values, like measured durations. // // This will ignore the value and trace information for Exemplars; // the buckets, zero count, scale, sum, max, min, and counts of // ExponentialHistogramDataPoints; the buckets, sum, count, max, // and min of HistogramDataPoints; the value of DataPoints. func IgnoreValue() Option { return fnOption(func(cfg config) config { cfg.ignoreValue = true return cfg }) } // AssertEqual asserts that the two concrete data-types from the metricdata // package are equal. func AssertEqual[T Datatypes](t TestingT, expected, actual T, opts ...Option) bool { t.Helper() cfg := newConfig(opts) // Generic types cannot be type asserted. Use an interface instead. aIface := interface{}(actual) var r []string switch e := interface{}(expected).(type) { case metricdata.Exemplar[int64]: r = equalExemplars(e, aIface.(metricdata.Exemplar[int64]), cfg) case metricdata.Exemplar[float64]: r = equalExemplars(e, aIface.(metricdata.Exemplar[float64]), cfg) case metricdata.DataPoint[int64]: r = equalDataPoints(e, aIface.(metricdata.DataPoint[int64]), cfg) case metricdata.DataPoint[float64]: r = equalDataPoints(e, aIface.(metricdata.DataPoint[float64]), cfg) case metricdata.Gauge[int64]: r = equalGauges(e, aIface.(metricdata.Gauge[int64]), cfg) case metricdata.Gauge[float64]: r = equalGauges(e, aIface.(metricdata.Gauge[float64]), cfg) case metricdata.Histogram[float64]: r = equalHistograms(e, aIface.(metricdata.Histogram[float64]), cfg) case metricdata.Histogram[int64]: r = equalHistograms(e, aIface.(metricdata.Histogram[int64]), cfg) case metricdata.HistogramDataPoint[float64]: r = equalHistogramDataPoints(e, aIface.(metricdata.HistogramDataPoint[float64]), cfg) case metricdata.HistogramDataPoint[int64]: r = equalHistogramDataPoints(e, aIface.(metricdata.HistogramDataPoint[int64]), cfg) case metricdata.Extrema[int64]: r = equalExtrema(e, aIface.(metricdata.Extrema[int64]), cfg) case metricdata.Extrema[float64]: r = equalExtrema(e, aIface.(metricdata.Extrema[float64]), cfg) case metricdata.Metrics: r = equalMetrics(e, aIface.(metricdata.Metrics), cfg) case metricdata.ResourceMetrics: r = equalResourceMetrics(e, aIface.(metricdata.ResourceMetrics), cfg) case metricdata.ScopeMetrics: r = equalScopeMetrics(e, aIface.(metricdata.ScopeMetrics), cfg) case metricdata.Sum[int64]: r = equalSums(e, aIface.(metricdata.Sum[int64]), cfg) case metricdata.Sum[float64]: r = equalSums(e, aIface.(metricdata.Sum[float64]), cfg) case metricdata.ExponentialHistogram[float64]: r = equalExponentialHistograms(e, aIface.(metricdata.ExponentialHistogram[float64]), cfg) case metricdata.ExponentialHistogram[int64]: r = equalExponentialHistograms(e, aIface.(metricdata.ExponentialHistogram[int64]), cfg) case metricdata.ExponentialHistogramDataPoint[float64]: r = equalExponentialHistogramDataPoints(e, aIface.(metricdata.ExponentialHistogramDataPoint[float64]), cfg) case metricdata.ExponentialHistogramDataPoint[int64]: r = equalExponentialHistogramDataPoints(e, aIface.(metricdata.ExponentialHistogramDataPoint[int64]), cfg) case metricdata.ExponentialBucket: r = equalExponentialBuckets(e, aIface.(metricdata.ExponentialBucket), cfg) case metricdata.Summary: r = equalSummary(e, aIface.(metricdata.Summary), cfg) case metricdata.SummaryDataPoint: r = equalSummaryDataPoint(e, aIface.(metricdata.SummaryDataPoint), cfg) case metricdata.QuantileValue: r = equalQuantileValue(e, aIface.(metricdata.QuantileValue), cfg) default: // We control all types passed to this, panic to signal developers // early they changed things in an incompatible way. panic(fmt.Sprintf("unknown types: %T", expected)) } if len(r) > 0 { t.Error(r) return false } return true } // AssertAggregationsEqual asserts that two Aggregations are equal. func AssertAggregationsEqual(t TestingT, expected, actual metricdata.Aggregation, opts ...Option) bool { t.Helper() cfg := newConfig(opts) if r := equalAggregations(expected, actual, cfg); len(r) > 0 { t.Error(r) return false } return true } // AssertHasAttributes asserts that all Datapoints or HistogramDataPoints have all passed attrs. func AssertHasAttributes[T Datatypes](t TestingT, actual T, attrs ...attribute.KeyValue) bool { t.Helper() var reasons []string switch e := interface{}(actual).(type) { case metricdata.Exemplar[int64]: reasons = hasAttributesExemplars(e, attrs...) case metricdata.Exemplar[float64]: reasons = hasAttributesExemplars(e, attrs...) case metricdata.DataPoint[int64]: reasons = hasAttributesDataPoints(e, attrs...) case metricdata.DataPoint[float64]: reasons = hasAttributesDataPoints(e, attrs...) case metricdata.Gauge[int64]: reasons = hasAttributesGauge(e, attrs...) case metricdata.Gauge[float64]: reasons = hasAttributesGauge(e, attrs...) case metricdata.Sum[int64]: reasons = hasAttributesSum(e, attrs...) case metricdata.Sum[float64]: reasons = hasAttributesSum(e, attrs...) case metricdata.HistogramDataPoint[int64]: reasons = hasAttributesHistogramDataPoints(e, attrs...) case metricdata.HistogramDataPoint[float64]: reasons = hasAttributesHistogramDataPoints(e, attrs...) case metricdata.Extrema[int64], metricdata.Extrema[float64]: // Nothing to check. case metricdata.Histogram[int64]: reasons = hasAttributesHistogram(e, attrs...) case metricdata.Histogram[float64]: reasons = hasAttributesHistogram(e, attrs...) case metricdata.Metrics: reasons = hasAttributesMetrics(e, attrs...) case metricdata.ScopeMetrics: reasons = hasAttributesScopeMetrics(e, attrs...) case metricdata.ResourceMetrics: reasons = hasAttributesResourceMetrics(e, attrs...) case metricdata.ExponentialHistogram[int64]: reasons = hasAttributesExponentialHistogram(e, attrs...) case metricdata.ExponentialHistogram[float64]: reasons = hasAttributesExponentialHistogram(e, attrs...) case metricdata.ExponentialHistogramDataPoint[int64]: reasons = hasAttributesExponentialHistogramDataPoints(e, attrs...) case metricdata.ExponentialHistogramDataPoint[float64]: reasons = hasAttributesExponentialHistogramDataPoints(e, attrs...) case metricdata.ExponentialBucket: // Nothing to check. case metricdata.Summary: reasons = hasAttributesSummary(e, attrs...) case metricdata.SummaryDataPoint: reasons = hasAttributesSummaryDataPoint(e, attrs...) case metricdata.QuantileValue: // Nothing to check. default: // We control all types passed to this, panic to signal developers // early they changed things in an incompatible way. panic(fmt.Sprintf("unknown types: %T", actual)) } if len(reasons) > 0 { t.Error(reasons) return false } return true } opentelemetry-go-1.21.0/sdk/metric/metricdata/metricdatatest/assertion_fail_test.go000066400000000000000000000077151452547353200307040ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build tests_fail // +build tests_fail package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" import ( "testing" "go.opentelemetry.io/otel/attribute" ) // These tests are used to develop the failure messages of this package's // assertions. They can be run with the following. // // go test -tags tests_fail ./... func testFailDatatype[T Datatypes](a, b T) func(*testing.T) { return func(t *testing.T) { AssertEqual(t, a, b) } } func TestFailAssertEqual(t *testing.T) { t.Run("ResourceMetrics", testFailDatatype(resourceMetricsA, resourceMetricsB)) t.Run("ScopeMetrics", testFailDatatype(scopeMetricsA, scopeMetricsB)) t.Run("Metrics", testFailDatatype(metricsA, metricsB)) t.Run("HistogramInt64", testFailDatatype(histogramInt64A, histogramInt64B)) t.Run("HistogramFloat64", testFailDatatype(histogramFloat64A, histogramFloat64B)) t.Run("SumInt64", testFailDatatype(sumInt64A, sumInt64B)) t.Run("SumFloat64", testFailDatatype(sumFloat64A, sumFloat64B)) t.Run("GaugeInt64", testFailDatatype(gaugeInt64A, gaugeInt64B)) t.Run("GaugeFloat64", testFailDatatype(gaugeFloat64A, gaugeFloat64B)) t.Run("HistogramDataPointInt64", testFailDatatype(histogramDataPointInt64A, histogramDataPointInt64B)) t.Run("HistogramDataPointFloat64", testFailDatatype(histogramDataPointFloat64A, histogramDataPointFloat64B)) t.Run("DataPointInt64", testFailDatatype(dataPointInt64A, dataPointInt64B)) t.Run("DataPointFloat64", testFailDatatype(dataPointFloat64A, dataPointFloat64B)) t.Run("ExemplarInt64", testFailDatatype(exemplarInt64A, exemplarInt64B)) t.Run("ExemplarFloat64", testFailDatatype(exemplarFloat64A, exemplarFloat64B)) t.Run("Extrema", testFailDatatype(minA, minB)) } func TestFailAssertAggregationsEqual(t *testing.T) { AssertAggregationsEqual(t, sumInt64A, nil) AssertAggregationsEqual(t, sumFloat64A, gaugeFloat64A) AssertAggregationsEqual(t, unknownAggregation{}, unknownAggregation{}) AssertAggregationsEqual(t, sumInt64A, sumInt64B) AssertAggregationsEqual(t, sumFloat64A, sumFloat64B) AssertAggregationsEqual(t, gaugeInt64A, gaugeInt64B) AssertAggregationsEqual(t, gaugeFloat64A, gaugeFloat64B) AssertAggregationsEqual(t, histogramInt64A, histogramInt64B) AssertAggregationsEqual(t, histogramFloat64A, histogramFloat64B) } func TestFailAssertAttribute(t *testing.T) { AssertHasAttributes(t, exemplarInt64A, attribute.Bool("A", false)) AssertHasAttributes(t, exemplarFloat64A, attribute.Bool("B", true)) AssertHasAttributes(t, dataPointInt64A, attribute.Bool("A", false)) AssertHasAttributes(t, dataPointFloat64A, attribute.Bool("B", true)) AssertHasAttributes(t, gaugeInt64A, attribute.Bool("A", false)) AssertHasAttributes(t, gaugeFloat64A, attribute.Bool("B", true)) AssertHasAttributes(t, sumInt64A, attribute.Bool("A", false)) AssertHasAttributes(t, sumFloat64A, attribute.Bool("B", true)) AssertHasAttributes(t, histogramDataPointInt64A, attribute.Bool("A", false)) AssertHasAttributes(t, histogramDataPointFloat64A, attribute.Bool("B", true)) AssertHasAttributes(t, histogramInt64A, attribute.Bool("A", false)) AssertHasAttributes(t, histogramFloat64A, attribute.Bool("B", true)) AssertHasAttributes(t, metricsA, attribute.Bool("A", false)) AssertHasAttributes(t, metricsA, attribute.Bool("B", true)) AssertHasAttributes(t, resourceMetricsA, attribute.Bool("A", false)) AssertHasAttributes(t, resourceMetricsA, attribute.Bool("B", true)) } opentelemetry-go-1.21.0/sdk/metric/metricdata/metricdatatest/assertion_test.go000066400000000000000000001303041452547353200277000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" import ( "testing" "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) var ( attrA = attribute.NewSet(attribute.Bool("A", true)) attrB = attribute.NewSet(attribute.Bool("B", true)) fltrAttrA = []attribute.KeyValue{attribute.Bool("filter A", true)} fltrAttrB = []attribute.KeyValue{attribute.Bool("filter B", true)} startA = time.Now() startB = startA.Add(time.Millisecond) endA = startA.Add(time.Second) endB = startB.Add(time.Second) spanIDA = []byte{0, 0, 0, 0, 0, 0, 0, 1} spanIDB = []byte{0, 0, 0, 0, 0, 0, 0, 2} traceIDA = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} traceIDB = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2} exemplarInt64A = metricdata.Exemplar[int64]{ FilteredAttributes: fltrAttrA, Time: endA, Value: -10, SpanID: spanIDA, TraceID: traceIDA, } exemplarFloat64A = metricdata.Exemplar[float64]{ FilteredAttributes: fltrAttrA, Time: endA, Value: -10.0, SpanID: spanIDA, TraceID: traceIDA, } exemplarInt64B = metricdata.Exemplar[int64]{ FilteredAttributes: fltrAttrB, Time: endB, Value: 12, SpanID: spanIDB, TraceID: traceIDB, } exemplarFloat64B = metricdata.Exemplar[float64]{ FilteredAttributes: fltrAttrB, Time: endB, Value: 12.0, SpanID: spanIDB, TraceID: traceIDB, } exemplarInt64C = metricdata.Exemplar[int64]{ FilteredAttributes: fltrAttrA, Time: endB, Value: -10, SpanID: spanIDA, TraceID: traceIDA, } exemplarFloat64C = metricdata.Exemplar[float64]{ FilteredAttributes: fltrAttrA, Time: endB, Value: -10.0, SpanID: spanIDA, TraceID: traceIDA, } exemplarInt64D = metricdata.Exemplar[int64]{ FilteredAttributes: fltrAttrA, Time: endA, Value: 12, SpanID: spanIDA, TraceID: traceIDA, } exemplarFloat64D = metricdata.Exemplar[float64]{ FilteredAttributes: fltrAttrA, Time: endA, Value: 12.0, SpanID: spanIDA, TraceID: traceIDA, } dataPointInt64A = metricdata.DataPoint[int64]{ Attributes: attrA, StartTime: startA, Time: endA, Value: -1, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64A}, } dataPointFloat64A = metricdata.DataPoint[float64]{ Attributes: attrA, StartTime: startA, Time: endA, Value: -1.0, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64A}, } dataPointInt64B = metricdata.DataPoint[int64]{ Attributes: attrB, StartTime: startB, Time: endB, Value: 2, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64B}, } dataPointFloat64B = metricdata.DataPoint[float64]{ Attributes: attrB, StartTime: startB, Time: endB, Value: 2.0, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64B}, } dataPointInt64C = metricdata.DataPoint[int64]{ Attributes: attrA, StartTime: startB, Time: endB, Value: -1, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64C}, } dataPointFloat64C = metricdata.DataPoint[float64]{ Attributes: attrA, StartTime: startB, Time: endB, Value: -1.0, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64C}, } dataPointInt64D = metricdata.DataPoint[int64]{ Attributes: attrA, StartTime: startA, Time: endA, Value: 2, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64A}, } dataPointFloat64D = metricdata.DataPoint[float64]{ Attributes: attrA, StartTime: startA, Time: endA, Value: 2.0, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64A}, } minFloat64A = metricdata.NewExtrema(-1.) minInt64A = metricdata.NewExtrema[int64](-1) minFloat64B, maxFloat64B = metricdata.NewExtrema(3.), metricdata.NewExtrema(99.) minInt64B, maxInt64B = metricdata.NewExtrema[int64](3), metricdata.NewExtrema[int64](99) minFloat64C = metricdata.NewExtrema(-1.) minInt64C = metricdata.NewExtrema[int64](-1) histogramDataPointInt64A = metricdata.HistogramDataPoint[int64]{ Attributes: attrA, StartTime: startA, Time: endA, Count: 2, Bounds: []float64{0, 10}, BucketCounts: []uint64{1, 1}, Min: minInt64A, Sum: 2, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64A}, } histogramDataPointFloat64A = metricdata.HistogramDataPoint[float64]{ Attributes: attrA, StartTime: startA, Time: endA, Count: 2, Bounds: []float64{0, 10}, BucketCounts: []uint64{1, 1}, Min: minFloat64A, Sum: 2, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64A}, } histogramDataPointInt64B = metricdata.HistogramDataPoint[int64]{ Attributes: attrB, StartTime: startB, Time: endB, Count: 3, Bounds: []float64{0, 10, 100}, BucketCounts: []uint64{1, 1, 1}, Max: maxInt64B, Min: minInt64B, Sum: 3, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64B}, } histogramDataPointFloat64B = metricdata.HistogramDataPoint[float64]{ Attributes: attrB, StartTime: startB, Time: endB, Count: 3, Bounds: []float64{0, 10, 100}, BucketCounts: []uint64{1, 1, 1}, Max: maxFloat64B, Min: minFloat64B, Sum: 3, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64B}, } histogramDataPointInt64C = metricdata.HistogramDataPoint[int64]{ Attributes: attrA, StartTime: startB, Time: endB, Count: 2, Bounds: []float64{0, 10}, BucketCounts: []uint64{1, 1}, Min: minInt64C, Sum: 2, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64C}, } histogramDataPointFloat64C = metricdata.HistogramDataPoint[float64]{ Attributes: attrA, StartTime: startB, Time: endB, Count: 2, Bounds: []float64{0, 10}, BucketCounts: []uint64{1, 1}, Min: minFloat64C, Sum: 2, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64C}, } histogramDataPointInt64D = metricdata.HistogramDataPoint[int64]{ Attributes: attrA, StartTime: startA, Time: endA, Count: 3, Bounds: []float64{0, 10, 100}, BucketCounts: []uint64{1, 1, 1}, Max: maxInt64B, Min: minInt64B, Sum: 3, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64A}, } histogramDataPointFloat64D = metricdata.HistogramDataPoint[float64]{ Attributes: attrA, StartTime: startA, Time: endA, Count: 3, Bounds: []float64{0, 10, 100}, BucketCounts: []uint64{1, 1, 1}, Max: maxFloat64B, Min: minFloat64B, Sum: 3, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64A}, } quantileValueA = metricdata.QuantileValue{ Quantile: 0.0, Value: 0.1, } quantileValueB = metricdata.QuantileValue{ Quantile: 0.1, Value: 0.2, } summaryDataPointA = metricdata.SummaryDataPoint{ Attributes: attrA, StartTime: startA, Time: endA, Count: 2, Sum: 3, QuantileValues: []metricdata.QuantileValue{quantileValueA}, } summaryDataPointB = metricdata.SummaryDataPoint{ Attributes: attrB, StartTime: startB, Time: endB, Count: 3, QuantileValues: []metricdata.QuantileValue{quantileValueB}, } summaryDataPointC = metricdata.SummaryDataPoint{ Attributes: attrA, StartTime: startB, Time: endB, Count: 2, Sum: 3, QuantileValues: []metricdata.QuantileValue{quantileValueA}, } summaryDataPointD = metricdata.SummaryDataPoint{ Attributes: attrA, StartTime: startA, Time: endA, Count: 3, QuantileValues: []metricdata.QuantileValue{quantileValueB}, } exponentialBucket2 = metricdata.ExponentialBucket{ Offset: 2, Counts: []uint64{1, 1}, } exponentialBucket3 = metricdata.ExponentialBucket{ Offset: 3, Counts: []uint64{1, 1}, } exponentialBucket4 = metricdata.ExponentialBucket{ Offset: 4, Counts: []uint64{1, 1, 1}, } exponentialBucket5 = metricdata.ExponentialBucket{ Offset: 5, Counts: []uint64{1, 1, 1}, } exponentialHistogramDataPointInt64A = metricdata.ExponentialHistogramDataPoint[int64]{ Attributes: attrA, StartTime: startA, Time: endA, Count: 5, Min: minInt64A, Sum: 2, Scale: 1, ZeroCount: 1, PositiveBucket: exponentialBucket3, NegativeBucket: exponentialBucket2, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64A}, } exponentialHistogramDataPointFloat64A = metricdata.ExponentialHistogramDataPoint[float64]{ Attributes: attrA, StartTime: startA, Time: endA, Count: 5, Min: minFloat64A, Sum: 2, Scale: 1, ZeroCount: 1, PositiveBucket: exponentialBucket3, NegativeBucket: exponentialBucket2, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64A}, } exponentialHistogramDataPointInt64B = metricdata.ExponentialHistogramDataPoint[int64]{ Attributes: attrB, StartTime: startB, Time: endB, Count: 6, Min: minInt64B, Max: maxInt64B, Sum: 3, Scale: 2, ZeroCount: 3, PositiveBucket: exponentialBucket4, NegativeBucket: exponentialBucket5, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64B}, } exponentialHistogramDataPointFloat64B = metricdata.ExponentialHistogramDataPoint[float64]{ Attributes: attrB, StartTime: startB, Time: endB, Count: 6, Min: minFloat64B, Max: maxFloat64B, Sum: 3, Scale: 2, ZeroCount: 3, PositiveBucket: exponentialBucket4, NegativeBucket: exponentialBucket5, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64B}, } exponentialHistogramDataPointInt64C = metricdata.ExponentialHistogramDataPoint[int64]{ Attributes: attrA, StartTime: startB, Time: endB, Count: 5, Min: minInt64C, Sum: 2, Scale: 1, ZeroCount: 1, PositiveBucket: exponentialBucket3, NegativeBucket: exponentialBucket2, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64C}, } exponentialHistogramDataPointFloat64C = metricdata.ExponentialHistogramDataPoint[float64]{ Attributes: attrA, StartTime: startB, Time: endB, Count: 5, Min: minFloat64A, Sum: 2, Scale: 1, ZeroCount: 1, PositiveBucket: exponentialBucket3, NegativeBucket: exponentialBucket2, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64C}, } exponentialHistogramDataPointInt64D = metricdata.ExponentialHistogramDataPoint[int64]{ Attributes: attrA, StartTime: startA, Time: endA, Count: 6, Min: minInt64B, Max: maxInt64B, Sum: 3, Scale: 2, ZeroCount: 3, PositiveBucket: exponentialBucket4, NegativeBucket: exponentialBucket5, Exemplars: []metricdata.Exemplar[int64]{exemplarInt64A}, } exponentialHistogramDataPointFloat64D = metricdata.ExponentialHistogramDataPoint[float64]{ Attributes: attrA, StartTime: startA, Time: endA, Count: 6, Min: minFloat64B, Max: maxFloat64B, Sum: 3, Scale: 2, ZeroCount: 3, PositiveBucket: exponentialBucket4, NegativeBucket: exponentialBucket5, Exemplars: []metricdata.Exemplar[float64]{exemplarFloat64A}, } gaugeInt64A = metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{dataPointInt64A}, } gaugeFloat64A = metricdata.Gauge[float64]{ DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64A}, } gaugeInt64B = metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{dataPointInt64B}, } gaugeFloat64B = metricdata.Gauge[float64]{ DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64B}, } gaugeInt64C = metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{dataPointInt64C}, } gaugeFloat64C = metricdata.Gauge[float64]{ DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64C}, } gaugeInt64D = metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{dataPointInt64D}, } gaugeFloat64D = metricdata.Gauge[float64]{ DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64D}, } sumInt64A = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{dataPointInt64A}, } sumFloat64A = metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64A}, } sumInt64B = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{dataPointInt64B}, } sumFloat64B = metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64B}, } sumInt64C = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{dataPointInt64C}, } sumFloat64C = metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64C}, } sumInt64D = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{dataPointInt64D}, } sumFloat64D = metricdata.Sum[float64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64D}, } histogramInt64A = metricdata.Histogram[int64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[int64]{histogramDataPointInt64A}, } histogramFloat64A = metricdata.Histogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{histogramDataPointFloat64A}, } histogramInt64B = metricdata.Histogram[int64]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[int64]{histogramDataPointInt64B}, } histogramFloat64B = metricdata.Histogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{histogramDataPointFloat64B}, } histogramInt64C = metricdata.Histogram[int64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[int64]{histogramDataPointInt64C}, } histogramFloat64C = metricdata.Histogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{histogramDataPointFloat64C}, } histogramInt64D = metricdata.Histogram[int64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[int64]{histogramDataPointInt64D}, } histogramFloat64D = metricdata.Histogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[float64]{histogramDataPointFloat64D}, } exponentialHistogramInt64A = metricdata.ExponentialHistogram[int64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[int64]{exponentialHistogramDataPointInt64A}, } exponentialHistogramFloat64A = metricdata.ExponentialHistogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[float64]{exponentialHistogramDataPointFloat64A}, } exponentialHistogramInt64B = metricdata.ExponentialHistogram[int64]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[int64]{exponentialHistogramDataPointInt64B}, } exponentialHistogramFloat64B = metricdata.ExponentialHistogram[float64]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[float64]{exponentialHistogramDataPointFloat64B}, } exponentialHistogramInt64C = metricdata.ExponentialHistogram[int64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[int64]{exponentialHistogramDataPointInt64C}, } exponentialHistogramFloat64C = metricdata.ExponentialHistogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[float64]{exponentialHistogramDataPointFloat64C}, } exponentialHistogramInt64D = metricdata.ExponentialHistogram[int64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[int64]{exponentialHistogramDataPointInt64D}, } exponentialHistogramFloat64D = metricdata.ExponentialHistogram[float64]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.ExponentialHistogramDataPoint[float64]{exponentialHistogramDataPointFloat64D}, } summaryA = metricdata.Summary{ DataPoints: []metricdata.SummaryDataPoint{summaryDataPointA}, } summaryB = metricdata.Summary{ DataPoints: []metricdata.SummaryDataPoint{summaryDataPointB}, } summaryC = metricdata.Summary{ DataPoints: []metricdata.SummaryDataPoint{summaryDataPointC}, } summaryD = metricdata.Summary{ DataPoints: []metricdata.SummaryDataPoint{summaryDataPointD}, } metricsA = metricdata.Metrics{ Name: "A", Description: "A desc", Unit: "1", Data: sumInt64A, } metricsB = metricdata.Metrics{ Name: "B", Description: "B desc", Unit: "By", Data: gaugeFloat64B, } metricsC = metricdata.Metrics{ Name: "A", Description: "A desc", Unit: "1", Data: sumInt64C, } metricsD = metricdata.Metrics{ Name: "A", Description: "A desc", Unit: "1", Data: sumInt64D, } scopeMetricsA = metricdata.ScopeMetrics{ Scope: instrumentation.Scope{Name: "A"}, Metrics: []metricdata.Metrics{metricsA}, } scopeMetricsB = metricdata.ScopeMetrics{ Scope: instrumentation.Scope{Name: "B"}, Metrics: []metricdata.Metrics{metricsB}, } scopeMetricsC = metricdata.ScopeMetrics{ Scope: instrumentation.Scope{Name: "A"}, Metrics: []metricdata.Metrics{metricsC}, } scopeMetricsD = metricdata.ScopeMetrics{ Scope: instrumentation.Scope{Name: "A"}, Metrics: []metricdata.Metrics{metricsD}, } resourceMetricsA = metricdata.ResourceMetrics{ Resource: resource.NewSchemaless(attribute.String("resource", "A")), ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsA}, } resourceMetricsB = metricdata.ResourceMetrics{ Resource: resource.NewSchemaless(attribute.String("resource", "B")), ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsB}, } resourceMetricsC = metricdata.ResourceMetrics{ Resource: resource.NewSchemaless(attribute.String("resource", "A")), ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsC}, } resourceMetricsD = metricdata.ResourceMetrics{ Resource: resource.NewSchemaless(attribute.String("resource", "A")), ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsD}, } ) type equalFunc[T Datatypes] func(T, T, config) []string func testDatatype[T Datatypes](a, b T, f equalFunc[T]) func(*testing.T) { return func(t *testing.T) { AssertEqual(t, a, a) AssertEqual(t, b, b) r := f(a, b, newConfig(nil)) assert.Greaterf(t, len(r), 0, "%v == %v", a, b) } } func testDatatypeIgnoreTime[T Datatypes](a, b T, f equalFunc[T]) func(*testing.T) { return func(t *testing.T) { AssertEqual(t, a, a) AssertEqual(t, b, b) c := newConfig([]Option{IgnoreTimestamp()}) r := f(a, b, c) assert.Len(t, r, 0, "unexpected inequality") } } func testDatatypeIgnoreExemplars[T Datatypes](a, b T, f equalFunc[T]) func(*testing.T) { return func(t *testing.T) { AssertEqual(t, a, a) AssertEqual(t, b, b) c := newConfig([]Option{IgnoreExemplars()}) r := f(a, b, c) assert.Len(t, r, 0, "unexpected inequality") } } func testDatatypeIgnoreValue[T Datatypes](a, b T, f equalFunc[T]) func(*testing.T) { return func(t *testing.T) { AssertEqual(t, a, a) AssertEqual(t, b, b) c := newConfig([]Option{IgnoreValue()}) r := f(a, b, c) assert.Len(t, r, 0, "unexpected inequality") } } func TestTestingTImplementation(t *testing.T) { assert.Implements(t, (*TestingT)(nil), t) } func TestAssertEqual(t *testing.T) { t.Run("ResourceMetrics", testDatatype(resourceMetricsA, resourceMetricsB, equalResourceMetrics)) t.Run("ScopeMetrics", testDatatype(scopeMetricsA, scopeMetricsB, equalScopeMetrics)) t.Run("Metrics", testDatatype(metricsA, metricsB, equalMetrics)) t.Run("HistogramInt64", testDatatype(histogramInt64A, histogramInt64B, equalHistograms[int64])) t.Run("HistogramFloat64", testDatatype(histogramFloat64A, histogramFloat64B, equalHistograms[float64])) t.Run("SumInt64", testDatatype(sumInt64A, sumInt64B, equalSums[int64])) t.Run("SumFloat64", testDatatype(sumFloat64A, sumFloat64B, equalSums[float64])) t.Run("GaugeInt64", testDatatype(gaugeInt64A, gaugeInt64B, equalGauges[int64])) t.Run("GaugeFloat64", testDatatype(gaugeFloat64A, gaugeFloat64B, equalGauges[float64])) t.Run("HistogramDataPointInt64", testDatatype(histogramDataPointInt64A, histogramDataPointInt64B, equalHistogramDataPoints[int64])) t.Run("HistogramDataPointFloat64", testDatatype(histogramDataPointFloat64A, histogramDataPointFloat64B, equalHistogramDataPoints[float64])) t.Run("DataPointInt64", testDatatype(dataPointInt64A, dataPointInt64B, equalDataPoints[int64])) t.Run("DataPointFloat64", testDatatype(dataPointFloat64A, dataPointFloat64B, equalDataPoints[float64])) t.Run("ExtremaInt64", testDatatype(minInt64A, minInt64B, equalExtrema[int64])) t.Run("ExtremaFloat64", testDatatype(minFloat64A, minFloat64B, equalExtrema[float64])) t.Run("ExemplarInt64", testDatatype(exemplarInt64A, exemplarInt64B, equalExemplars[int64])) t.Run("ExemplarFloat64", testDatatype(exemplarFloat64A, exemplarFloat64B, equalExemplars[float64])) t.Run("ExponentialHistogramInt64", testDatatype(exponentialHistogramInt64A, exponentialHistogramInt64B, equalExponentialHistograms[int64])) t.Run("ExponentialHistogramFloat64", testDatatype(exponentialHistogramFloat64A, exponentialHistogramFloat64B, equalExponentialHistograms[float64])) t.Run("ExponentialHistogramDataPointInt64", testDatatype(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64B, equalExponentialHistogramDataPoints[int64])) t.Run("ExponentialHistogramDataPointFloat64", testDatatype(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64B, equalExponentialHistogramDataPoints[float64])) t.Run("ExponentialBuckets", testDatatype(exponentialBucket2, exponentialBucket3, equalExponentialBuckets)) t.Run("Summary", testDatatype(summaryA, summaryB, equalSummary)) t.Run("SummaryDataPoint", testDatatype(summaryDataPointA, summaryDataPointB, equalSummaryDataPoint)) t.Run("QuantileValues", testDatatype(quantileValueA, quantileValueB, equalQuantileValue)) } func TestAssertEqualIgnoreTime(t *testing.T) { t.Run("ResourceMetrics", testDatatypeIgnoreTime(resourceMetricsA, resourceMetricsC, equalResourceMetrics)) t.Run("ScopeMetrics", testDatatypeIgnoreTime(scopeMetricsA, scopeMetricsC, equalScopeMetrics)) t.Run("Metrics", testDatatypeIgnoreTime(metricsA, metricsC, equalMetrics)) t.Run("HistogramInt64", testDatatypeIgnoreTime(histogramInt64A, histogramInt64C, equalHistograms[int64])) t.Run("HistogramFloat64", testDatatypeIgnoreTime(histogramFloat64A, histogramFloat64C, equalHistograms[float64])) t.Run("SumInt64", testDatatypeIgnoreTime(sumInt64A, sumInt64C, equalSums[int64])) t.Run("SumFloat64", testDatatypeIgnoreTime(sumFloat64A, sumFloat64C, equalSums[float64])) t.Run("GaugeInt64", testDatatypeIgnoreTime(gaugeInt64A, gaugeInt64C, equalGauges[int64])) t.Run("GaugeFloat64", testDatatypeIgnoreTime(gaugeFloat64A, gaugeFloat64C, equalGauges[float64])) t.Run("HistogramDataPointInt64", testDatatypeIgnoreTime(histogramDataPointInt64A, histogramDataPointInt64C, equalHistogramDataPoints[int64])) t.Run("HistogramDataPointFloat64", testDatatypeIgnoreTime(histogramDataPointFloat64A, histogramDataPointFloat64C, equalHistogramDataPoints[float64])) t.Run("DataPointInt64", testDatatypeIgnoreTime(dataPointInt64A, dataPointInt64C, equalDataPoints[int64])) t.Run("DataPointFloat64", testDatatypeIgnoreTime(dataPointFloat64A, dataPointFloat64C, equalDataPoints[float64])) t.Run("ExtremaInt64", testDatatypeIgnoreTime(minInt64A, minInt64C, equalExtrema[int64])) t.Run("ExtremaFloat64", testDatatypeIgnoreTime(minFloat64A, minFloat64C, equalExtrema[float64])) t.Run("ExemplarInt64", testDatatypeIgnoreTime(exemplarInt64A, exemplarInt64C, equalExemplars[int64])) t.Run("ExemplarFloat64", testDatatypeIgnoreTime(exemplarFloat64A, exemplarFloat64C, equalExemplars[float64])) t.Run("ExponentialHistogramInt64", testDatatypeIgnoreTime(exponentialHistogramInt64A, exponentialHistogramInt64C, equalExponentialHistograms[int64])) t.Run("ExponentialHistogramFloat64", testDatatypeIgnoreTime(exponentialHistogramFloat64A, exponentialHistogramFloat64C, equalExponentialHistograms[float64])) t.Run("ExponentialHistogramDataPointInt64", testDatatypeIgnoreTime(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64C, equalExponentialHistogramDataPoints[int64])) t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreTime(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64C, equalExponentialHistogramDataPoints[float64])) t.Run("Summary", testDatatypeIgnoreTime(summaryA, summaryC, equalSummary)) t.Run("SummaryDataPoint", testDatatypeIgnoreTime(summaryDataPointA, summaryDataPointC, equalSummaryDataPoint)) } func TestAssertEqualIgnoreExemplars(t *testing.T) { hdpInt64 := histogramDataPointInt64A hdpInt64.Exemplars = []metricdata.Exemplar[int64]{exemplarInt64B} t.Run("HistogramDataPointInt64", testDatatypeIgnoreExemplars(histogramDataPointInt64A, hdpInt64, equalHistogramDataPoints[int64])) hdpFloat64 := histogramDataPointFloat64A hdpFloat64.Exemplars = []metricdata.Exemplar[float64]{exemplarFloat64B} t.Run("HistogramDataPointFloat64", testDatatypeIgnoreExemplars(histogramDataPointFloat64A, hdpFloat64, equalHistogramDataPoints[float64])) dpInt64 := dataPointInt64A dpInt64.Exemplars = []metricdata.Exemplar[int64]{exemplarInt64B} t.Run("DataPointInt64", testDatatypeIgnoreExemplars(dataPointInt64A, dpInt64, equalDataPoints[int64])) dpFloat64 := dataPointFloat64A dpFloat64.Exemplars = []metricdata.Exemplar[float64]{exemplarFloat64B} t.Run("DataPointFloat64", testDatatypeIgnoreExemplars(dataPointFloat64A, dpFloat64, equalDataPoints[float64])) ehdpInt64 := exponentialHistogramDataPointInt64A ehdpInt64.Exemplars = []metricdata.Exemplar[int64]{exemplarInt64B} t.Run("ExponentialHistogramDataPointInt64", testDatatypeIgnoreExemplars(exponentialHistogramDataPointInt64A, ehdpInt64, equalExponentialHistogramDataPoints[int64])) ehdpFloat64 := exponentialHistogramDataPointFloat64A ehdpFloat64.Exemplars = []metricdata.Exemplar[float64]{exemplarFloat64B} t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreExemplars(exponentialHistogramDataPointFloat64A, ehdpFloat64, equalExponentialHistogramDataPoints[float64])) } func TestAssertEqualIgnoreValue(t *testing.T) { t.Run("ResourceMetrics", testDatatypeIgnoreValue(resourceMetricsA, resourceMetricsD, equalResourceMetrics)) t.Run("ScopeMetrics", testDatatypeIgnoreValue(scopeMetricsA, scopeMetricsD, equalScopeMetrics)) t.Run("Metrics", testDatatypeIgnoreValue(metricsA, metricsD, equalMetrics)) t.Run("HistogramInt64", testDatatypeIgnoreValue(histogramInt64A, histogramInt64D, equalHistograms[int64])) t.Run("HistogramFloat64", testDatatypeIgnoreValue(histogramFloat64A, histogramFloat64D, equalHistograms[float64])) t.Run("SumInt64", testDatatypeIgnoreValue(sumInt64A, sumInt64D, equalSums[int64])) t.Run("SumFloat64", testDatatypeIgnoreValue(sumFloat64A, sumFloat64D, equalSums[float64])) t.Run("GaugeInt64", testDatatypeIgnoreValue(gaugeInt64A, gaugeInt64D, equalGauges[int64])) t.Run("GaugeFloat64", testDatatypeIgnoreValue(gaugeFloat64A, gaugeFloat64D, equalGauges[float64])) t.Run("HistogramDataPointInt64", testDatatypeIgnoreValue(histogramDataPointInt64A, histogramDataPointInt64D, equalHistogramDataPoints[int64])) t.Run("HistogramDataPointFloat64", testDatatypeIgnoreValue(histogramDataPointFloat64A, histogramDataPointFloat64D, equalHistogramDataPoints[float64])) t.Run("DataPointInt64", testDatatypeIgnoreValue(dataPointInt64A, dataPointInt64D, equalDataPoints[int64])) t.Run("DataPointFloat64", testDatatypeIgnoreValue(dataPointFloat64A, dataPointFloat64D, equalDataPoints[float64])) t.Run("ExemplarInt64", testDatatypeIgnoreValue(exemplarInt64A, exemplarInt64D, equalExemplars[int64])) t.Run("ExemplarFloat64", testDatatypeIgnoreValue(exemplarFloat64A, exemplarFloat64D, equalExemplars[float64])) t.Run("ExponentialHistogramInt64", testDatatypeIgnoreValue(exponentialHistogramInt64A, exponentialHistogramInt64D, equalExponentialHistograms[int64])) t.Run("ExponentialHistogramFloat64", testDatatypeIgnoreValue(exponentialHistogramFloat64A, exponentialHistogramFloat64D, equalExponentialHistograms[float64])) t.Run("ExponentialHistogramDataPointInt64", testDatatypeIgnoreValue(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64D, equalExponentialHistogramDataPoints[int64])) t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreValue(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64D, equalExponentialHistogramDataPoints[float64])) t.Run("Summary", testDatatypeIgnoreValue(summaryA, summaryD, equalSummary)) t.Run("SummaryDataPoint", testDatatypeIgnoreValue(summaryDataPointA, summaryDataPointD, equalSummaryDataPoint)) } type unknownAggregation struct { metricdata.Aggregation } func TestAssertAggregationsEqual(t *testing.T) { AssertAggregationsEqual(t, nil, nil) AssertAggregationsEqual(t, sumInt64A, sumInt64A) AssertAggregationsEqual(t, sumFloat64A, sumFloat64A) AssertAggregationsEqual(t, gaugeInt64A, gaugeInt64A) AssertAggregationsEqual(t, gaugeFloat64A, gaugeFloat64A) AssertAggregationsEqual(t, histogramInt64A, histogramInt64A) AssertAggregationsEqual(t, histogramFloat64A, histogramFloat64A) AssertAggregationsEqual(t, exponentialHistogramInt64A, exponentialHistogramInt64A) AssertAggregationsEqual(t, exponentialHistogramFloat64A, exponentialHistogramFloat64A) AssertAggregationsEqual(t, summaryA, summaryA) r := equalAggregations(sumInt64A, nil, config{}) assert.Len(t, r, 1, "should return nil comparison mismatch only") r = equalAggregations(sumInt64A, gaugeInt64A, config{}) assert.Len(t, r, 1, "should return with type mismatch only") r = equalAggregations(unknownAggregation{}, unknownAggregation{}, config{}) assert.Len(t, r, 1, "should return with unknown aggregation only") r = equalAggregations(sumInt64A, sumInt64B, config{}) assert.Greaterf(t, len(r), 0, "sums should not be equal: %v == %v", sumInt64A, sumInt64B) r = equalAggregations(sumInt64A, sumInt64C, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "sums should be equal: %v", r) r = equalAggregations(sumInt64A, sumInt64D, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", sumInt64A, sumInt64D) r = equalAggregations(sumFloat64A, sumFloat64B, config{}) assert.Greaterf(t, len(r), 0, "sums should not be equal: %v == %v", sumFloat64A, sumFloat64B) r = equalAggregations(sumFloat64A, sumFloat64C, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "sums should be equal: %v", r) r = equalAggregations(sumFloat64A, sumFloat64D, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", sumFloat64A, sumFloat64D) r = equalAggregations(gaugeInt64A, gaugeInt64B, config{}) assert.Greaterf(t, len(r), 0, "gauges should not be equal: %v == %v", gaugeInt64A, gaugeInt64B) r = equalAggregations(gaugeInt64A, gaugeInt64C, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "gauges should be equal: %v", r) r = equalAggregations(gaugeInt64A, gaugeInt64D, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", gaugeInt64A, gaugeInt64D) r = equalAggregations(gaugeFloat64A, gaugeFloat64B, config{}) assert.Greaterf(t, len(r), 0, "gauges should not be equal: %v == %v", gaugeFloat64A, gaugeFloat64B) r = equalAggregations(gaugeFloat64A, gaugeFloat64C, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "gauges should be equal: %v", r) r = equalAggregations(gaugeFloat64A, gaugeFloat64D, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", gaugeFloat64A, gaugeFloat64D) r = equalAggregations(histogramInt64A, histogramInt64B, config{}) assert.Greaterf(t, len(r), 0, "histograms should not be equal: %v == %v", histogramInt64A, histogramInt64B) r = equalAggregations(histogramInt64A, histogramInt64C, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "histograms should be equal: %v", r) r = equalAggregations(histogramInt64A, histogramInt64D, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", histogramInt64A, histogramInt64D) r = equalAggregations(histogramFloat64A, histogramFloat64B, config{}) assert.Greaterf(t, len(r), 0, "histograms should not be equal: %v == %v", histogramFloat64A, histogramFloat64B) r = equalAggregations(histogramFloat64A, histogramFloat64C, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "histograms should be equal: %v", r) r = equalAggregations(histogramFloat64A, histogramFloat64D, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", histogramFloat64A, histogramFloat64D) r = equalAggregations(exponentialHistogramInt64A, exponentialHistogramInt64B, config{}) assert.Greaterf(t, len(r), 0, "exponential histograms should not be equal: %v == %v", exponentialHistogramInt64A, exponentialHistogramInt64B) r = equalAggregations(exponentialHistogramInt64A, exponentialHistogramInt64C, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "exponential histograms should be equal: %v", r) r = equalAggregations(exponentialHistogramInt64A, exponentialHistogramInt64D, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", exponentialHistogramInt64A, exponentialHistogramInt64D) r = equalAggregations(exponentialHistogramFloat64A, exponentialHistogramFloat64B, config{}) assert.Greaterf(t, len(r), 0, "exponential histograms should not be equal: %v == %v", exponentialHistogramFloat64A, exponentialHistogramFloat64B) r = equalAggregations(exponentialHistogramFloat64A, exponentialHistogramFloat64C, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "exponential histograms should be equal: %v", r) r = equalAggregations(exponentialHistogramFloat64A, exponentialHistogramFloat64D, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", exponentialHistogramFloat64A, exponentialHistogramFloat64D) r = equalAggregations(summaryA, summaryB, config{}) assert.Greaterf(t, len(r), 0, "summaries should not be equal: %v == %v", summaryA, summaryB) r = equalAggregations(summaryA, summaryC, config{ignoreTimestamp: true}) assert.Len(t, r, 0, "summaries should be equal: %v", r) r = equalAggregations(summaryA, summaryD, config{ignoreValue: true}) assert.Len(t, r, 0, "value should be ignored: %v == %v", summaryA, summaryD) } func TestAssertAttributes(t *testing.T) { AssertHasAttributes(t, minFloat64A, attribute.Bool("A", true)) // No-op, always pass. AssertHasAttributes(t, exemplarInt64A, attribute.Bool("filter A", true)) AssertHasAttributes(t, exemplarFloat64A, attribute.Bool("filter A", true)) AssertHasAttributes(t, dataPointInt64A, attribute.Bool("A", true)) AssertHasAttributes(t, dataPointFloat64A, attribute.Bool("A", true)) AssertHasAttributes(t, gaugeInt64A, attribute.Bool("A", true)) AssertHasAttributes(t, gaugeFloat64A, attribute.Bool("A", true)) AssertHasAttributes(t, sumInt64A, attribute.Bool("A", true)) AssertHasAttributes(t, sumFloat64A, attribute.Bool("A", true)) AssertHasAttributes(t, histogramDataPointInt64A, attribute.Bool("A", true)) AssertHasAttributes(t, histogramDataPointFloat64A, attribute.Bool("A", true)) AssertHasAttributes(t, histogramInt64A, attribute.Bool("A", true)) AssertHasAttributes(t, histogramFloat64A, attribute.Bool("A", true)) AssertHasAttributes(t, metricsA, attribute.Bool("A", true)) AssertHasAttributes(t, scopeMetricsA, attribute.Bool("A", true)) AssertHasAttributes(t, resourceMetricsA, attribute.Bool("A", true)) AssertHasAttributes(t, exponentialHistogramDataPointInt64A, attribute.Bool("A", true)) AssertHasAttributes(t, exponentialHistogramDataPointFloat64A, attribute.Bool("A", true)) AssertHasAttributes(t, exponentialHistogramInt64A, attribute.Bool("A", true)) AssertHasAttributes(t, exponentialHistogramFloat64A, attribute.Bool("A", true)) AssertHasAttributes(t, exponentialBucket2, attribute.Bool("A", true)) // No-op, always pass. AssertHasAttributes(t, summaryDataPointA, attribute.Bool("A", true)) AssertHasAttributes(t, summaryA, attribute.Bool("A", true)) AssertHasAttributes(t, quantileValueA, attribute.Bool("A", true)) // No-op, always pass. r := hasAttributesAggregation(gaugeInt64A, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "gaugeInt64A has A=True") r = hasAttributesAggregation(gaugeFloat64A, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "gaugeFloat64A has A=True") r = hasAttributesAggregation(sumInt64A, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "sumInt64A has A=True") r = hasAttributesAggregation(sumFloat64A, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "sumFloat64A has A=True") r = hasAttributesAggregation(histogramInt64A, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "histogramInt64A has A=True") r = hasAttributesAggregation(histogramFloat64A, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "histogramFloat64A has A=True") r = hasAttributesAggregation(exponentialHistogramInt64A, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "exponentialHistogramInt64A has A=True") r = hasAttributesAggregation(exponentialHistogramFloat64A, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "exponentialHistogramFloat64A has A=True") r = hasAttributesAggregation(summaryA, attribute.Bool("A", true)) assert.Equal(t, len(r), 0, "summaryA has A=True") r = hasAttributesAggregation(gaugeInt64A, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "gaugeInt64A does not have A=False") r = hasAttributesAggregation(gaugeFloat64A, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "gaugeFloat64A does not have A=False") r = hasAttributesAggregation(sumInt64A, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "sumInt64A does not have A=False") r = hasAttributesAggregation(sumFloat64A, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "sumFloat64A does not have A=False") r = hasAttributesAggregation(histogramInt64A, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "histogramInt64A does not have A=False") r = hasAttributesAggregation(histogramFloat64A, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "histogramFloat64A does not have A=False") r = hasAttributesAggregation(exponentialHistogramInt64A, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "exponentialHistogramInt64A does not have A=False") r = hasAttributesAggregation(exponentialHistogramFloat64A, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "exponentialHistogramFloat64A does not have A=False") r = hasAttributesAggregation(summaryA, attribute.Bool("A", false)) assert.Greater(t, len(r), 0, "summaryA does not have A=False") r = hasAttributesAggregation(gaugeInt64A, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "gaugeInt64A does not have Attribute B") r = hasAttributesAggregation(gaugeFloat64A, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "gaugeFloat64A does not have Attribute B") r = hasAttributesAggregation(sumInt64A, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "sumInt64A does not have Attribute B") r = hasAttributesAggregation(sumFloat64A, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "sumFloat64A does not have Attribute B") r = hasAttributesAggregation(histogramInt64A, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "histogramIntA does not have Attribute B") r = hasAttributesAggregation(histogramFloat64A, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "histogramFloatA does not have Attribute B") r = hasAttributesAggregation(exponentialHistogramInt64A, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "exponentialHistogramIntA does not have Attribute B") r = hasAttributesAggregation(exponentialHistogramFloat64A, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "exponentialHistogramFloatA does not have Attribute B") r = hasAttributesAggregation(summaryA, attribute.Bool("B", true)) assert.Greater(t, len(r), 0, "summaryA does not have Attribute B") } func TestAssertAttributesFail(t *testing.T) { fakeT := &testing.T{} assert.False(t, AssertHasAttributes(fakeT, dataPointInt64A, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, dataPointFloat64A, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, exemplarInt64A, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, exemplarFloat64A, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, gaugeInt64A, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, gaugeFloat64A, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, sumInt64A, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, sumFloat64A, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, histogramDataPointInt64A, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, histogramDataPointFloat64A, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, histogramInt64A, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, histogramFloat64A, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, metricsA, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, metricsA, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, resourceMetricsA, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, resourceMetricsA, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, exponentialHistogramDataPointInt64A, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, exponentialHistogramDataPointFloat64A, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, exponentialHistogramInt64A, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, exponentialHistogramFloat64A, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, summaryDataPointA, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, summaryDataPointA, attribute.Bool("B", true))) assert.False(t, AssertHasAttributes(fakeT, summaryA, attribute.Bool("A", false))) assert.False(t, AssertHasAttributes(fakeT, summaryA, attribute.Bool("B", true))) sum := metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{ dataPointInt64A, dataPointInt64B, }, } assert.False(t, AssertHasAttributes(fakeT, sum, attribute.Bool("A", true))) } opentelemetry-go-1.21.0/sdk/metric/metricdata/metricdatatest/comparisons.go000066400000000000000000000634041452547353200271750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" import ( "bytes" "fmt" "reflect" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // equalResourceMetrics returns reasons ResourceMetrics are not equal. If they // are equal, the returned reasons will be empty. // // The ScopeMetrics each ResourceMetrics contains are compared based on // containing the same ScopeMetrics, not the order they are stored in. func equalResourceMetrics(a, b metricdata.ResourceMetrics, cfg config) (reasons []string) { if !a.Resource.Equal(b.Resource) { reasons = append(reasons, notEqualStr("Resources", a.Resource, b.Resource)) } r := compareDiff(diffSlices( a.ScopeMetrics, b.ScopeMetrics, func(a, b metricdata.ScopeMetrics) bool { r := equalScopeMetrics(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("ResourceMetrics ScopeMetrics not equal:\n%s", r)) } return reasons } // equalScopeMetrics returns reasons ScopeMetrics are not equal. If they are // equal, the returned reasons will be empty. // // The Metrics each ScopeMetrics contains are compared based on containing the // same Metrics, not the order they are stored in. func equalScopeMetrics(a, b metricdata.ScopeMetrics, cfg config) (reasons []string) { if a.Scope != b.Scope { reasons = append(reasons, notEqualStr("Scope", a.Scope, b.Scope)) } r := compareDiff(diffSlices( a.Metrics, b.Metrics, func(a, b metricdata.Metrics) bool { r := equalMetrics(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("ScopeMetrics Metrics not equal:\n%s", r)) } return reasons } // equalMetrics returns reasons Metrics are not equal. If they are equal, the // returned reasons will be empty. func equalMetrics(a, b metricdata.Metrics, cfg config) (reasons []string) { if a.Name != b.Name { reasons = append(reasons, notEqualStr("Name", a.Name, b.Name)) } if a.Description != b.Description { reasons = append(reasons, notEqualStr("Description", a.Description, b.Description)) } if a.Unit != b.Unit { reasons = append(reasons, notEqualStr("Unit", a.Unit, b.Unit)) } r := equalAggregations(a.Data, b.Data, cfg) if len(r) > 0 { reasons = append(reasons, "Metrics Data not equal:") reasons = append(reasons, r...) } return reasons } // equalAggregations returns reasons a and b are not equal. If they are equal, // the returned reasons will be empty. func equalAggregations(a, b metricdata.Aggregation, cfg config) (reasons []string) { if a == nil || b == nil { if a != b { return []string{notEqualStr("Aggregation", a, b)} } return reasons } if reflect.TypeOf(a) != reflect.TypeOf(b) { return []string{fmt.Sprintf("Aggregation types not equal:\nexpected: %T\nactual: %T", a, b)} } switch v := a.(type) { case metricdata.Gauge[int64]: r := equalGauges(v, b.(metricdata.Gauge[int64]), cfg) if len(r) > 0 { reasons = append(reasons, "Gauge[int64] not equal:") reasons = append(reasons, r...) } case metricdata.Gauge[float64]: r := equalGauges(v, b.(metricdata.Gauge[float64]), cfg) if len(r) > 0 { reasons = append(reasons, "Gauge[float64] not equal:") reasons = append(reasons, r...) } case metricdata.Sum[int64]: r := equalSums(v, b.(metricdata.Sum[int64]), cfg) if len(r) > 0 { reasons = append(reasons, "Sum[int64] not equal:") reasons = append(reasons, r...) } case metricdata.Sum[float64]: r := equalSums(v, b.(metricdata.Sum[float64]), cfg) if len(r) > 0 { reasons = append(reasons, "Sum[float64] not equal:") reasons = append(reasons, r...) } case metricdata.Histogram[int64]: r := equalHistograms(v, b.(metricdata.Histogram[int64]), cfg) if len(r) > 0 { reasons = append(reasons, "Histogram not equal:") reasons = append(reasons, r...) } case metricdata.Histogram[float64]: r := equalHistograms(v, b.(metricdata.Histogram[float64]), cfg) if len(r) > 0 { reasons = append(reasons, "Histogram not equal:") reasons = append(reasons, r...) } case metricdata.ExponentialHistogram[int64]: r := equalExponentialHistograms(v, b.(metricdata.ExponentialHistogram[int64]), cfg) if len(r) > 0 { reasons = append(reasons, "ExponentialHistogram not equal:") reasons = append(reasons, r...) } case metricdata.ExponentialHistogram[float64]: r := equalExponentialHistograms(v, b.(metricdata.ExponentialHistogram[float64]), cfg) if len(r) > 0 { reasons = append(reasons, "ExponentialHistogram not equal:") reasons = append(reasons, r...) } case metricdata.Summary: r := equalSummary(v, b.(metricdata.Summary), cfg) if len(r) > 0 { reasons = append(reasons, "Summary not equal:") reasons = append(reasons, r...) } default: reasons = append(reasons, fmt.Sprintf("Aggregation of unknown types %T", a)) } return reasons } // equalGauges returns reasons Gauges are not equal. If they are equal, the // returned reasons will be empty. // // The DataPoints each Gauge contains are compared based on containing the // same DataPoints, not the order they are stored in. func equalGauges[N int64 | float64](a, b metricdata.Gauge[N], cfg config) (reasons []string) { r := compareDiff(diffSlices( a.DataPoints, b.DataPoints, func(a, b metricdata.DataPoint[N]) bool { r := equalDataPoints(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("Gauge DataPoints not equal:\n%s", r)) } return reasons } // equalSums returns reasons Sums are not equal. If they are equal, the // returned reasons will be empty. // // The DataPoints each Sum contains are compared based on containing the same // DataPoints, not the order they are stored in. func equalSums[N int64 | float64](a, b metricdata.Sum[N], cfg config) (reasons []string) { if a.Temporality != b.Temporality { reasons = append(reasons, notEqualStr("Temporality", a.Temporality, b.Temporality)) } if a.IsMonotonic != b.IsMonotonic { reasons = append(reasons, notEqualStr("IsMonotonic", a.IsMonotonic, b.IsMonotonic)) } r := compareDiff(diffSlices( a.DataPoints, b.DataPoints, func(a, b metricdata.DataPoint[N]) bool { r := equalDataPoints(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("Sum DataPoints not equal:\n%s", r)) } return reasons } // equalHistograms returns reasons Histograms are not equal. If they are // equal, the returned reasons will be empty. // // The DataPoints each Histogram contains are compared based on containing the // same HistogramDataPoint, not the order they are stored in. func equalHistograms[N int64 | float64](a, b metricdata.Histogram[N], cfg config) (reasons []string) { if a.Temporality != b.Temporality { reasons = append(reasons, notEqualStr("Temporality", a.Temporality, b.Temporality)) } r := compareDiff(diffSlices( a.DataPoints, b.DataPoints, func(a, b metricdata.HistogramDataPoint[N]) bool { r := equalHistogramDataPoints(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("Histogram DataPoints not equal:\n%s", r)) } return reasons } // equalDataPoints returns reasons DataPoints are not equal. If they are // equal, the returned reasons will be empty. func equalDataPoints[N int64 | float64](a, b metricdata.DataPoint[N], cfg config) (reasons []string) { // nolint: revive // Intentional internal control flag if !a.Attributes.Equals(&b.Attributes) { reasons = append(reasons, notEqualStr( "Attributes", a.Attributes.Encoded(attribute.DefaultEncoder()), b.Attributes.Encoded(attribute.DefaultEncoder()), )) } if !cfg.ignoreTimestamp { if !a.StartTime.Equal(b.StartTime) { reasons = append(reasons, notEqualStr("StartTime", a.StartTime.UnixNano(), b.StartTime.UnixNano())) } if !a.Time.Equal(b.Time) { reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano())) } } if !cfg.ignoreValue { if a.Value != b.Value { reasons = append(reasons, notEqualStr("Value", a.Value, b.Value)) } } if !cfg.ignoreExemplars { r := compareDiff(diffSlices( a.Exemplars, b.Exemplars, func(a, b metricdata.Exemplar[N]) bool { r := equalExemplars(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("Exemplars not equal:\n%s", r)) } } return reasons } // equalHistogramDataPoints returns reasons HistogramDataPoints are not equal. // If they are equal, the returned reasons will be empty. func equalHistogramDataPoints[N int64 | float64](a, b metricdata.HistogramDataPoint[N], cfg config) (reasons []string) { // nolint: revive // Intentional internal control flag if !a.Attributes.Equals(&b.Attributes) { reasons = append(reasons, notEqualStr( "Attributes", a.Attributes.Encoded(attribute.DefaultEncoder()), b.Attributes.Encoded(attribute.DefaultEncoder()), )) } if !cfg.ignoreTimestamp { if !a.StartTime.Equal(b.StartTime) { reasons = append(reasons, notEqualStr("StartTime", a.StartTime.UnixNano(), b.StartTime.UnixNano())) } if !a.Time.Equal(b.Time) { reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano())) } } if !cfg.ignoreValue { if a.Count != b.Count { reasons = append(reasons, notEqualStr("Count", a.Count, b.Count)) } if !equalSlices(a.Bounds, b.Bounds) { reasons = append(reasons, notEqualStr("Bounds", a.Bounds, b.Bounds)) } if !equalSlices(a.BucketCounts, b.BucketCounts) { reasons = append(reasons, notEqualStr("BucketCounts", a.BucketCounts, b.BucketCounts)) } if !eqExtrema(a.Min, b.Min) { reasons = append(reasons, notEqualStr("Min", a.Min, b.Min)) } if !eqExtrema(a.Max, b.Max) { reasons = append(reasons, notEqualStr("Max", a.Max, b.Max)) } if a.Sum != b.Sum { reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum)) } } if !cfg.ignoreExemplars { r := compareDiff(diffSlices( a.Exemplars, b.Exemplars, func(a, b metricdata.Exemplar[N]) bool { r := equalExemplars(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("Exemplars not equal:\n%s", r)) } } return reasons } // equalExponentialHistograms returns reasons exponential Histograms are not equal. If they are // equal, the returned reasons will be empty. // // The DataPoints each Histogram contains are compared based on containing the // same HistogramDataPoint, not the order they are stored in. func equalExponentialHistograms[N int64 | float64](a, b metricdata.ExponentialHistogram[N], cfg config) (reasons []string) { if a.Temporality != b.Temporality { reasons = append(reasons, notEqualStr("Temporality", a.Temporality, b.Temporality)) } r := compareDiff(diffSlices( a.DataPoints, b.DataPoints, func(a, b metricdata.ExponentialHistogramDataPoint[N]) bool { r := equalExponentialHistogramDataPoints(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("Histogram DataPoints not equal:\n%s", r)) } return reasons } // equalExponentialHistogramDataPoints returns reasons HistogramDataPoints are not equal. // If they are equal, the returned reasons will be empty. func equalExponentialHistogramDataPoints[N int64 | float64](a, b metricdata.ExponentialHistogramDataPoint[N], cfg config) (reasons []string) { // nolint: revive // Intentional internal control flag if !a.Attributes.Equals(&b.Attributes) { reasons = append(reasons, notEqualStr( "Attributes", a.Attributes.Encoded(attribute.DefaultEncoder()), b.Attributes.Encoded(attribute.DefaultEncoder()), )) } if !cfg.ignoreTimestamp { if !a.StartTime.Equal(b.StartTime) { reasons = append(reasons, notEqualStr("StartTime", a.StartTime.UnixNano(), b.StartTime.UnixNano())) } if !a.Time.Equal(b.Time) { reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano())) } } if !cfg.ignoreValue { if a.Count != b.Count { reasons = append(reasons, notEqualStr("Count", a.Count, b.Count)) } if !eqExtrema(a.Min, b.Min) { reasons = append(reasons, notEqualStr("Min", a.Min, b.Min)) } if !eqExtrema(a.Max, b.Max) { reasons = append(reasons, notEqualStr("Max", a.Max, b.Max)) } if a.Sum != b.Sum { reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum)) } if a.Scale != b.Scale { reasons = append(reasons, notEqualStr("Scale", a.Scale, b.Scale)) } if a.ZeroCount != b.ZeroCount { reasons = append(reasons, notEqualStr("ZeroCount", a.ZeroCount, b.ZeroCount)) } r := equalExponentialBuckets(a.PositiveBucket, b.PositiveBucket, cfg) if len(r) > 0 { reasons = append(reasons, r...) } r = equalExponentialBuckets(a.NegativeBucket, b.NegativeBucket, cfg) if len(r) > 0 { reasons = append(reasons, r...) } } if !cfg.ignoreExemplars { r := compareDiff(diffSlices( a.Exemplars, b.Exemplars, func(a, b metricdata.Exemplar[N]) bool { r := equalExemplars(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("Exemplars not equal:\n%s", r)) } } return reasons } func equalExponentialBuckets(a, b metricdata.ExponentialBucket, _ config) (reasons []string) { if a.Offset != b.Offset { reasons = append(reasons, notEqualStr("Offset", a.Offset, b.Offset)) } if !equalSlices(a.Counts, b.Counts) { reasons = append(reasons, notEqualStr("Counts", a.Counts, b.Counts)) } return reasons } func equalSummary(a, b metricdata.Summary, cfg config) (reasons []string) { r := compareDiff(diffSlices( a.DataPoints, b.DataPoints, func(a, b metricdata.SummaryDataPoint) bool { r := equalSummaryDataPoint(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, fmt.Sprintf("Summary DataPoints not equal:\n%s", r)) } return reasons } func equalSummaryDataPoint(a, b metricdata.SummaryDataPoint, cfg config) (reasons []string) { if !a.Attributes.Equals(&b.Attributes) { reasons = append(reasons, notEqualStr( "Attributes", a.Attributes.Encoded(attribute.DefaultEncoder()), b.Attributes.Encoded(attribute.DefaultEncoder()), )) } if !cfg.ignoreTimestamp { if !a.StartTime.Equal(b.StartTime) { reasons = append(reasons, notEqualStr("StartTime", a.StartTime.UnixNano(), b.StartTime.UnixNano())) } if !a.Time.Equal(b.Time) { reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano())) } } if !cfg.ignoreValue { if a.Count != b.Count { reasons = append(reasons, notEqualStr("Count", a.Count, b.Count)) } if a.Sum != b.Sum { reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum)) } r := compareDiff(diffSlices( a.QuantileValues, b.QuantileValues, func(a, b metricdata.QuantileValue) bool { r := equalQuantileValue(a, b, cfg) return len(r) == 0 }, )) if r != "" { reasons = append(reasons, r) } } return reasons } func equalQuantileValue(a, b metricdata.QuantileValue, _ config) (reasons []string) { if a.Quantile != b.Quantile { reasons = append(reasons, notEqualStr("Quantile", a.Quantile, b.Quantile)) } if a.Value != b.Value { reasons = append(reasons, notEqualStr("Value", a.Value, b.Value)) } return reasons } func notEqualStr(prefix string, expected, actual interface{}) string { return fmt.Sprintf("%s not equal:\nexpected: %v\nactual: %v", prefix, expected, actual) } func equalSlices[T comparable](a, b []T) bool { if len(a) != len(b) { return false } for i, v := range a { if v != b[i] { return false } } return true } func equalExtrema[N int64 | float64](a, b metricdata.Extrema[N], _ config) (reasons []string) { if !eqExtrema(a, b) { reasons = append(reasons, notEqualStr("Extrema", a, b)) } return reasons } func eqExtrema[N int64 | float64](a, b metricdata.Extrema[N]) bool { aV, aOk := a.Value() bV, bOk := b.Value() if !aOk || !bOk { return aOk == bOk } return aV == bV } func equalKeyValue(a, b []attribute.KeyValue) bool { // Comparison of []attribute.KeyValue as a comparable requires Go >= 1.20. // To support Go < 1.20 use this function instead. if len(a) != len(b) { return false } for i, v := range a { if v.Key != b[i].Key { return false } if v.Value.Type() != b[i].Value.Type() { return false } switch v.Value.Type() { case attribute.BOOL: if v.Value.AsBool() != b[i].Value.AsBool() { return false } case attribute.INT64: if v.Value.AsInt64() != b[i].Value.AsInt64() { return false } case attribute.FLOAT64: if v.Value.AsFloat64() != b[i].Value.AsFloat64() { return false } case attribute.STRING: if v.Value.AsString() != b[i].Value.AsString() { return false } case attribute.BOOLSLICE: if ok := equalSlices(v.Value.AsBoolSlice(), b[i].Value.AsBoolSlice()); !ok { return false } case attribute.INT64SLICE: if ok := equalSlices(v.Value.AsInt64Slice(), b[i].Value.AsInt64Slice()); !ok { return false } case attribute.FLOAT64SLICE: if ok := equalSlices(v.Value.AsFloat64Slice(), b[i].Value.AsFloat64Slice()); !ok { return false } case attribute.STRINGSLICE: if ok := equalSlices(v.Value.AsStringSlice(), b[i].Value.AsStringSlice()); !ok { return false } default: // We control all types passed to this, panic to signal developers // early they changed things in an incompatible way. panic(fmt.Sprintf("unknown attribute value type: %s", v.Value.Type())) } } return true } func equalExemplars[N int64 | float64](a, b metricdata.Exemplar[N], cfg config) (reasons []string) { if !equalKeyValue(a.FilteredAttributes, b.FilteredAttributes) { reasons = append(reasons, notEqualStr("FilteredAttributes", a.FilteredAttributes, b.FilteredAttributes)) } if !cfg.ignoreTimestamp { if !a.Time.Equal(b.Time) { reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano())) } } if !cfg.ignoreValue { if a.Value != b.Value { reasons = append(reasons, notEqualStr("Value", a.Value, b.Value)) } } if !equalSlices(a.SpanID, b.SpanID) { reasons = append(reasons, notEqualStr("SpanID", a.SpanID, b.SpanID)) } if !equalSlices(a.TraceID, b.TraceID) { reasons = append(reasons, notEqualStr("TraceID", a.TraceID, b.TraceID)) } return reasons } func diffSlices[T any](a, b []T, equal func(T, T) bool) (extraA, extraB []T) { visited := make([]bool, len(b)) for i := 0; i < len(a); i++ { found := false for j := 0; j < len(b); j++ { if visited[j] { continue } if equal(a[i], b[j]) { visited[j] = true found = true break } } if !found { extraA = append(extraA, a[i]) } } for j := 0; j < len(b); j++ { if visited[j] { continue } extraB = append(extraB, b[j]) } return extraA, extraB } func compareDiff[T any](extraExpected, extraActual []T) string { if len(extraExpected) == 0 && len(extraActual) == 0 { return "" } formatter := func(v T) string { return fmt.Sprintf("%#v", v) } var msg bytes.Buffer if len(extraExpected) > 0 { _, _ = msg.WriteString("missing expected values:\n") for _, v := range extraExpected { _, _ = msg.WriteString(formatter(v) + "\n") } } if len(extraActual) > 0 { _, _ = msg.WriteString("unexpected additional values:\n") for _, v := range extraActual { _, _ = msg.WriteString(formatter(v) + "\n") } } return msg.String() } func missingAttrStr(name string) string { return fmt.Sprintf("missing attribute %s", name) } func hasAttributesExemplars[T int64 | float64](exemplar metricdata.Exemplar[T], attrs ...attribute.KeyValue) (reasons []string) { s := attribute.NewSet(exemplar.FilteredAttributes...) for _, attr := range attrs { val, ok := s.Value(attr.Key) if !ok { reasons = append(reasons, missingAttrStr(string(attr.Key))) continue } if val != attr.Value { reasons = append(reasons, notEqualStr(string(attr.Key), attr.Value.Emit(), val.Emit())) } } return reasons } func hasAttributesDataPoints[T int64 | float64](dp metricdata.DataPoint[T], attrs ...attribute.KeyValue) (reasons []string) { for _, attr := range attrs { val, ok := dp.Attributes.Value(attr.Key) if !ok { reasons = append(reasons, missingAttrStr(string(attr.Key))) continue } if val != attr.Value { reasons = append(reasons, notEqualStr(string(attr.Key), attr.Value.Emit(), val.Emit())) } } return reasons } func hasAttributesGauge[T int64 | float64](gauge metricdata.Gauge[T], attrs ...attribute.KeyValue) (reasons []string) { for n, dp := range gauge.DataPoints { reas := hasAttributesDataPoints(dp, attrs...) if len(reas) > 0 { reasons = append(reasons, fmt.Sprintf("gauge datapoint %d attributes:\n", n)) reasons = append(reasons, reas...) } } return reasons } func hasAttributesSum[T int64 | float64](sum metricdata.Sum[T], attrs ...attribute.KeyValue) (reasons []string) { for n, dp := range sum.DataPoints { reas := hasAttributesDataPoints(dp, attrs...) if len(reas) > 0 { reasons = append(reasons, fmt.Sprintf("sum datapoint %d attributes:\n", n)) reasons = append(reasons, reas...) } } return reasons } func hasAttributesHistogramDataPoints[T int64 | float64](dp metricdata.HistogramDataPoint[T], attrs ...attribute.KeyValue) (reasons []string) { for _, attr := range attrs { val, ok := dp.Attributes.Value(attr.Key) if !ok { reasons = append(reasons, missingAttrStr(string(attr.Key))) continue } if val != attr.Value { reasons = append(reasons, notEqualStr(string(attr.Key), attr.Value.Emit(), val.Emit())) } } return reasons } func hasAttributesHistogram[T int64 | float64](histogram metricdata.Histogram[T], attrs ...attribute.KeyValue) (reasons []string) { for n, dp := range histogram.DataPoints { reas := hasAttributesHistogramDataPoints(dp, attrs...) if len(reas) > 0 { reasons = append(reasons, fmt.Sprintf("histogram datapoint %d attributes:\n", n)) reasons = append(reasons, reas...) } } return reasons } func hasAttributesExponentialHistogramDataPoints[T int64 | float64](dp metricdata.ExponentialHistogramDataPoint[T], attrs ...attribute.KeyValue) (reasons []string) { for _, attr := range attrs { val, ok := dp.Attributes.Value(attr.Key) if !ok { reasons = append(reasons, missingAttrStr(string(attr.Key))) continue } if val != attr.Value { reasons = append(reasons, notEqualStr(string(attr.Key), attr.Value.Emit(), val.Emit())) } } return reasons } func hasAttributesExponentialHistogram[T int64 | float64](histogram metricdata.ExponentialHistogram[T], attrs ...attribute.KeyValue) (reasons []string) { for n, dp := range histogram.DataPoints { reas := hasAttributesExponentialHistogramDataPoints(dp, attrs...) if len(reas) > 0 { reasons = append(reasons, fmt.Sprintf("histogram datapoint %d attributes:\n", n)) reasons = append(reasons, reas...) } } return reasons } func hasAttributesAggregation(agg metricdata.Aggregation, attrs ...attribute.KeyValue) (reasons []string) { switch agg := agg.(type) { case metricdata.Gauge[int64]: reasons = hasAttributesGauge(agg, attrs...) case metricdata.Gauge[float64]: reasons = hasAttributesGauge(agg, attrs...) case metricdata.Sum[int64]: reasons = hasAttributesSum(agg, attrs...) case metricdata.Sum[float64]: reasons = hasAttributesSum(agg, attrs...) case metricdata.Histogram[int64]: reasons = hasAttributesHistogram(agg, attrs...) case metricdata.Histogram[float64]: reasons = hasAttributesHistogram(agg, attrs...) case metricdata.ExponentialHistogram[int64]: reasons = hasAttributesExponentialHistogram(agg, attrs...) case metricdata.ExponentialHistogram[float64]: reasons = hasAttributesExponentialHistogram(agg, attrs...) case metricdata.Summary: reasons = hasAttributesSummary(agg, attrs...) default: reasons = []string{fmt.Sprintf("unknown aggregation %T", agg)} } return reasons } func hasAttributesMetrics(metrics metricdata.Metrics, attrs ...attribute.KeyValue) (reasons []string) { reas := hasAttributesAggregation(metrics.Data, attrs...) if len(reas) > 0 { reasons = append(reasons, fmt.Sprintf("Metric %s:\n", metrics.Name)) reasons = append(reasons, reas...) } return reasons } func hasAttributesScopeMetrics(sm metricdata.ScopeMetrics, attrs ...attribute.KeyValue) (reasons []string) { for n, metrics := range sm.Metrics { reas := hasAttributesMetrics(metrics, attrs...) if len(reas) > 0 { reasons = append(reasons, fmt.Sprintf("ScopeMetrics %s Metrics %d:\n", sm.Scope.Name, n)) reasons = append(reasons, reas...) } } return reasons } func hasAttributesResourceMetrics(rm metricdata.ResourceMetrics, attrs ...attribute.KeyValue) (reasons []string) { for n, sm := range rm.ScopeMetrics { reas := hasAttributesScopeMetrics(sm, attrs...) if len(reas) > 0 { reasons = append(reasons, fmt.Sprintf("ResourceMetrics ScopeMetrics %d:\n", n)) reasons = append(reasons, reas...) } } return reasons } func hasAttributesSummary(summary metricdata.Summary, attrs ...attribute.KeyValue) (reasons []string) { for n, dp := range summary.DataPoints { reas := hasAttributesSummaryDataPoint(dp, attrs...) if len(reas) > 0 { reasons = append(reasons, fmt.Sprintf("summary datapoint %d attributes:\n", n)) reasons = append(reasons, reas...) } } return reasons } func hasAttributesSummaryDataPoint(dp metricdata.SummaryDataPoint, attrs ...attribute.KeyValue) (reasons []string) { for _, attr := range attrs { val, ok := dp.Attributes.Value(attr.Key) if !ok { reasons = append(reasons, missingAttrStr(string(attr.Key))) continue } if val != attr.Value { reasons = append(reasons, notEqualStr(string(attr.Key), attr.Value.Emit(), val.Emit())) } } return reasons } opentelemetry-go-1.21.0/sdk/metric/metricdata/temporality.go000066400000000000000000000027771452547353200242020ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:generate stringer -type=Temporality package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" // Temporality defines the window that an aggregation was calculated over. type Temporality uint8 const ( // undefinedTemporality represents an unset Temporality. //nolint:deadcode,unused,varcheck undefinedTemporality Temporality = iota // CumulativeTemporality defines a measurement interval that continues to // expand forward in time from a starting point. New measurements are // added to all previous measurements since a start time. CumulativeTemporality // DeltaTemporality defines a measurement interval that resets each cycle. // Measurements from one cycle are recorded independently, measurements // from other cycles do not affect them. DeltaTemporality ) // MarshalText returns the byte encoded of t. func (t Temporality) MarshalText() ([]byte, error) { return []byte(t.String()), nil } opentelemetry-go-1.21.0/sdk/metric/metricdata/temporality_string.go000066400000000000000000000013511452547353200255530ustar00rootroot00000000000000// Code generated by "stringer -type=Temporality"; DO NOT EDIT. package metricdata import "strconv" func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} _ = x[undefinedTemporality-0] _ = x[CumulativeTemporality-1] _ = x[DeltaTemporality-2] } const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality" var _Temporality_index = [...]uint8{0, 20, 41, 57} func (i Temporality) String() string { if i >= Temporality(len(_Temporality_index)-1) { return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" } return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] } opentelemetry-go-1.21.0/sdk/metric/periodic_reader.go000066400000000000000000000257561452547353200226360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "errors" "fmt" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // Default periodic reader timing. const ( defaultTimeout = time.Millisecond * 30000 defaultInterval = time.Millisecond * 60000 ) // periodicReaderConfig contains configuration options for a PeriodicReader. type periodicReaderConfig struct { interval time.Duration timeout time.Duration producers []Producer } // newPeriodicReaderConfig returns a periodicReaderConfig configured with // options. func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig { c := periodicReaderConfig{ interval: envDuration(envInterval, defaultInterval), timeout: envDuration(envTimeout, defaultTimeout), } for _, o := range options { c = o.applyPeriodic(c) } return c } // PeriodicReaderOption applies a configuration option value to a PeriodicReader. type PeriodicReaderOption interface { applyPeriodic(periodicReaderConfig) periodicReaderConfig } // periodicReaderOptionFunc applies a set of options to a periodicReaderConfig. type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig // applyPeriodic returns a periodicReaderConfig with option(s) applied. func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig { return o(conf) } // WithTimeout configures the time a PeriodicReader waits for an export to // complete before canceling it. This includes an export which occurs as part // of Shutdown or ForceFlush if the user passed context does not have a // deadline. If the user passed context does have a deadline, it will be used // instead. // // This option overrides any value set for the // OTEL_METRIC_EXPORT_TIMEOUT environment variable. // // If this option is not used or d is less than or equal to zero, 30 seconds // is used as the default. func WithTimeout(d time.Duration) PeriodicReaderOption { return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { if d <= 0 { return conf } conf.timeout = d return conf }) } // WithInterval configures the intervening time between exports for a // PeriodicReader. // // This option overrides any value set for the // OTEL_METRIC_EXPORT_INTERVAL environment variable. // // If this option is not used or d is less than or equal to zero, 60 seconds // is used as the default. func WithInterval(d time.Duration) PeriodicReaderOption { return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { if d <= 0 { return conf } conf.interval = d return conf }) } // NewPeriodicReader returns a Reader that collects and exports metric data to // the exporter at a defined interval. By default, the returned Reader will // collect and export data every 60 seconds, and will cancel any attempts that // exceed 30 seconds, collect and export combined. The collect and export time // are not counted towards the interval between attempts. // // The Collect method of the returned Reader continues to gather and return // metric data to the user. It will not automatically send that data to the // exporter. That is left to the user to accomplish. func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *PeriodicReader { conf := newPeriodicReaderConfig(options) ctx, cancel := context.WithCancel(context.Background()) r := &PeriodicReader{ interval: conf.interval, timeout: conf.timeout, exporter: exporter, flushCh: make(chan chan error), cancel: cancel, done: make(chan struct{}), rmPool: sync.Pool{ New: func() interface{} { return &metricdata.ResourceMetrics{} }, }, } r.externalProducers.Store(conf.producers) go func() { defer func() { close(r.done) }() r.run(ctx, conf.interval) }() return r } // PeriodicReader is a Reader that continuously collects and exports metric // data at a set interval. type PeriodicReader struct { sdkProducer atomic.Value mu sync.Mutex isShutdown bool externalProducers atomic.Value interval time.Duration timeout time.Duration exporter Exporter flushCh chan chan error done chan struct{} cancel context.CancelFunc shutdownOnce sync.Once rmPool sync.Pool } // Compile time check the periodicReader implements Reader and is comparable. var _ = map[Reader]struct{}{&PeriodicReader{}: {}} // newTicker allows testing override. var newTicker = time.NewTicker // run continuously collects and exports metric data at the specified // interval. This will run until ctx is canceled or times out. func (r *PeriodicReader) run(ctx context.Context, interval time.Duration) { ticker := newTicker(interval) defer ticker.Stop() for { select { case <-ticker.C: err := r.collectAndExport(ctx) if err != nil { otel.Handle(err) } case errCh := <-r.flushCh: errCh <- r.collectAndExport(ctx) ticker.Reset(interval) case <-ctx.Done(): return } } } // register registers p as the producer of this reader. func (r *PeriodicReader) register(p sdkProducer) { // Only register once. If producer is already set, do nothing. if !r.sdkProducer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { msg := "did not register periodic reader" global.Error(errDuplicateRegister, msg) } } // temporality reports the Temporality for the instrument kind provided. func (r *PeriodicReader) temporality(kind InstrumentKind) metricdata.Temporality { return r.exporter.Temporality(kind) } // aggregation returns what Aggregation to use for kind. func (r *PeriodicReader) aggregation(kind InstrumentKind) Aggregation { // nolint:revive // import-shadow for method scoped by type. return r.exporter.Aggregation(kind) } // collectAndExport gather all metric data related to the periodicReader r from // the SDK and exports it with r's exporter. func (r *PeriodicReader) collectAndExport(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, r.timeout) defer cancel() // TODO (#3047): Use a sync.Pool or persistent pointer instead of allocating rm every Collect. rm := r.rmPool.Get().(*metricdata.ResourceMetrics) err := r.Collect(ctx, rm) if err == nil { err = r.export(ctx, rm) } r.rmPool.Put(rm) return err } // Collect gathers all metric data related to the Reader from // the SDK and other Producers and stores the result in rm. The metric // data is not exported to the configured exporter, it is left to the caller to // handle that if desired. // // Collect will return an error if called after shutdown. // Collect will return an error if rm is a nil ResourceMetrics. // Collect will return an error if the context's Done channel is closed. // // This method is safe to call concurrently. func (r *PeriodicReader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error { if rm == nil { return errors.New("periodic reader: *metricdata.ResourceMetrics is nil") } // TODO (#3047): When collect is updated to accept output as param, pass rm. return r.collect(ctx, r.sdkProducer.Load(), rm) } // collect unwraps p as a produceHolder and returns its produce results. func (r *PeriodicReader) collect(ctx context.Context, p interface{}, rm *metricdata.ResourceMetrics) error { if p == nil { return ErrReaderNotRegistered } ph, ok := p.(produceHolder) if !ok { // The atomic.Value is entirely in the periodicReader's control so // this should never happen. In the unforeseen case that this does // happen, return an error instead of panicking so a users code does // not halt in the processes. err := fmt.Errorf("periodic reader: invalid producer: %T", p) return err } err := ph.produce(ctx, rm) if err != nil { return err } var errs []error for _, producer := range r.externalProducers.Load().([]Producer) { externalMetrics, err := producer.Produce(ctx) if err != nil { errs = append(errs, err) } rm.ScopeMetrics = append(rm.ScopeMetrics, externalMetrics...) } global.Debug("PeriodicReader collection", "Data", rm) return unifyErrors(errs) } // export exports metric data m using r's exporter. func (r *PeriodicReader) export(ctx context.Context, m *metricdata.ResourceMetrics) error { return r.exporter.Export(ctx, m) } // ForceFlush flushes pending telemetry. // // This method is safe to call concurrently. func (r *PeriodicReader) ForceFlush(ctx context.Context) error { // Prioritize the ctx timeout if it is set. if _, ok := ctx.Deadline(); !ok { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, r.timeout) defer cancel() } errCh := make(chan error, 1) select { case r.flushCh <- errCh: select { case err := <-errCh: if err != nil { return err } close(errCh) case <-ctx.Done(): return ctx.Err() } case <-r.done: return ErrReaderShutdown case <-ctx.Done(): return ctx.Err() } return r.exporter.ForceFlush(ctx) } // Shutdown flushes pending telemetry and then stops the export pipeline. // // This method is safe to call concurrently. func (r *PeriodicReader) Shutdown(ctx context.Context) error { err := ErrReaderShutdown r.shutdownOnce.Do(func() { // Prioritize the ctx timeout if it is set. if _, ok := ctx.Deadline(); !ok { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, r.timeout) defer cancel() } // Stop the run loop. r.cancel() <-r.done // Any future call to Collect will now return ErrReaderShutdown. ph := r.sdkProducer.Swap(produceHolder{ produce: shutdownProducer{}.produce, }) if ph != nil { // Reader was registered. // Flush pending telemetry. m := r.rmPool.Get().(*metricdata.ResourceMetrics) err = r.collect(ctx, ph, m) if err == nil { err = r.export(ctx, m) } r.rmPool.Put(m) } sErr := r.exporter.Shutdown(ctx) if err == nil || err == ErrReaderShutdown { err = sErr } r.mu.Lock() defer r.mu.Unlock() r.isShutdown = true // release references to Producer(s) r.externalProducers.Store([]Producer{}) }) return err } // MarshalLog returns logging data about the PeriodicReader. func (r *PeriodicReader) MarshalLog() interface{} { r.mu.Lock() down := r.isShutdown r.mu.Unlock() return struct { Type string Exporter Exporter Registered bool Shutdown bool Interval time.Duration Timeout time.Duration }{ Type: "PeriodicReader", Exporter: r.exporter, Registered: r.sdkProducer.Load() != nil, Shutdown: down, Interval: r.interval, Timeout: r.timeout, } } opentelemetry-go-1.21.0/sdk/metric/periodic_reader_test.go000066400000000000000000000343711452547353200236660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) const testDur = time.Second * 2 func TestWithTimeout(t *testing.T) { test := func(d time.Duration) time.Duration { opts := []PeriodicReaderOption{WithTimeout(d)} return newPeriodicReaderConfig(opts).timeout } assert.Equal(t, testDur, test(testDur)) assert.Equal(t, defaultTimeout, newPeriodicReaderConfig(nil).timeout) assert.Equal(t, defaultTimeout, test(time.Duration(0)), "invalid timeout should use default") assert.Equal(t, defaultTimeout, test(time.Duration(-1)), "invalid timeout should use default") } func TestTimeoutEnvVar(t *testing.T) { testCases := []struct { v string want time.Duration }{ { // empty value "", defaultTimeout, }, { // positive value "1", time.Millisecond, }, { // non-positive value "0", defaultTimeout, }, { // value with unit (not supported) "1ms", defaultTimeout, }, { // NaN "abc", defaultTimeout, }, } for _, tc := range testCases { t.Run(tc.v, func(t *testing.T) { t.Setenv(envTimeout, tc.v) got := newPeriodicReaderConfig(nil).timeout assert.Equal(t, tc.want, got) }) } } func TestTimeoutEnvAndOption(t *testing.T) { want := 5 * time.Millisecond t.Setenv(envTimeout, "999") opts := []PeriodicReaderOption{WithTimeout(want)} got := newPeriodicReaderConfig(opts).timeout assert.Equal(t, want, got, "option should have precedence over env var") } func TestWithInterval(t *testing.T) { test := func(d time.Duration) time.Duration { opts := []PeriodicReaderOption{WithInterval(d)} return newPeriodicReaderConfig(opts).interval } assert.Equal(t, testDur, test(testDur)) assert.Equal(t, defaultInterval, newPeriodicReaderConfig(nil).interval) assert.Equal(t, defaultInterval, test(time.Duration(0)), "invalid interval should use default") assert.Equal(t, defaultInterval, test(time.Duration(-1)), "invalid interval should use default") } func TestIntervalEnvVar(t *testing.T) { testCases := []struct { v string want time.Duration }{ { // empty value "", defaultInterval, }, { // positive value "1", time.Millisecond, }, { // non-positive value "0", defaultInterval, }, { // value with unit (not supported) "1ms", defaultInterval, }, { // NaN "abc", defaultInterval, }, } for _, tc := range testCases { t.Run(tc.v, func(t *testing.T) { t.Setenv(envInterval, tc.v) got := newPeriodicReaderConfig(nil).interval assert.Equal(t, tc.want, got) }) } } func TestIntervalEnvAndOption(t *testing.T) { want := 5 * time.Millisecond t.Setenv(envInterval, "999") opts := []PeriodicReaderOption{WithInterval(want)} got := newPeriodicReaderConfig(opts).interval assert.Equal(t, want, got, "option should have precedence over env var") } type fnExporter struct { temporalityFunc TemporalitySelector aggregationFunc AggregationSelector exportFunc func(context.Context, *metricdata.ResourceMetrics) error flushFunc func(context.Context) error shutdownFunc func(context.Context) error } var _ Exporter = (*fnExporter)(nil) func (e *fnExporter) Temporality(k InstrumentKind) metricdata.Temporality { if e.temporalityFunc != nil { return e.temporalityFunc(k) } return DefaultTemporalitySelector(k) } func (e *fnExporter) Aggregation(k InstrumentKind) Aggregation { if e.aggregationFunc != nil { return e.aggregationFunc(k) } return DefaultAggregationSelector(k) } func (e *fnExporter) Export(ctx context.Context, m *metricdata.ResourceMetrics) error { if e.exportFunc != nil { return e.exportFunc(ctx, m) } return nil } func (e *fnExporter) ForceFlush(ctx context.Context) error { if e.flushFunc != nil { return e.flushFunc(ctx) } return nil } func (e *fnExporter) Shutdown(ctx context.Context) error { if e.shutdownFunc != nil { return e.shutdownFunc(ctx) } return nil } type periodicReaderTestSuite struct { *readerTestSuite ErrReader *PeriodicReader } func (ts *periodicReaderTestSuite) SetupTest() { ts.Reader = ts.Factory() e := &fnExporter{ exportFunc: func(context.Context, *metricdata.ResourceMetrics) error { return assert.AnError }, flushFunc: func(context.Context) error { return assert.AnError }, shutdownFunc: func(context.Context) error { return assert.AnError }, } ts.ErrReader = NewPeriodicReader(e, WithProducer(testExternalProducer{})) ts.ErrReader.register(testSDKProducer{}) } func (ts *periodicReaderTestSuite) TearDownTest() { ts.readerTestSuite.TearDownTest() _ = ts.ErrReader.Shutdown(context.Background()) } func (ts *periodicReaderTestSuite) TestForceFlushPropagated() { ts.Equal(assert.AnError, ts.ErrReader.ForceFlush(context.Background())) } func (ts *periodicReaderTestSuite) TestShutdownPropagated() { ts.Equal(assert.AnError, ts.ErrReader.Shutdown(context.Background())) } func TestPeriodicReader(t *testing.T) { suite.Run(t, &periodicReaderTestSuite{ readerTestSuite: &readerTestSuite{ Factory: func(opts ...ReaderOption) Reader { var popts []PeriodicReaderOption for _, o := range opts { popts = append(popts, o) } return NewPeriodicReader(new(fnExporter), popts...) }, }, }) } type chErrorHandler struct { Err chan error } func newChErrorHandler() *chErrorHandler { return &chErrorHandler{ Err: make(chan error, 1), } } func (eh chErrorHandler) Handle(err error) { eh.Err <- err } func triggerTicker(t *testing.T) chan time.Time { t.Helper() // Override the ticker C chan so tests are not flaky and rely on timing. orig := newTicker t.Cleanup(func() { newTicker = orig }) // Keep this at size zero so when triggered with a send it will hang until // the select case is selected and the collection loop is started. trigger := make(chan time.Time) newTicker = func(d time.Duration) *time.Ticker { ticker := time.NewTicker(d) ticker.C = trigger return ticker } return trigger } func TestPeriodicReaderRun(t *testing.T) { trigger := triggerTicker(t) // Register an error handler to validate export errors are passed to // otel.Handle. defer func(orig otel.ErrorHandler) { otel.SetErrorHandler(orig) }(otel.GetErrorHandler()) eh := newChErrorHandler() otel.SetErrorHandler(eh) exp := &fnExporter{ exportFunc: func(_ context.Context, m *metricdata.ResourceMetrics) error { // The testSDKProducer produces testResourceMetricsAB. assert.Equal(t, testResourceMetricsAB, *m) return assert.AnError }, } r := NewPeriodicReader(exp, WithProducer(testExternalProducer{})) r.register(testSDKProducer{}) trigger <- time.Now() assert.Equal(t, assert.AnError, <-eh.Err) // Ensure Reader is allowed clean up attempt. _ = r.Shutdown(context.Background()) } func TestPeriodicReaderFlushesPending(t *testing.T) { // Override the ticker so tests are not flaky and rely on timing. trigger := triggerTicker(t) t.Cleanup(func() { close(trigger) }) expFunc := func(t *testing.T) (exp Exporter, called *bool) { called = new(bool) return &fnExporter{ exportFunc: func(_ context.Context, m *metricdata.ResourceMetrics) error { // The testSDKProducer produces testResourceMetricsA. assert.Equal(t, testResourceMetricsAB, *m) *called = true return assert.AnError }, }, called } t.Run("ForceFlush", func(t *testing.T) { exp, called := expFunc(t) r := NewPeriodicReader(exp, WithProducer(testExternalProducer{})) r.register(testSDKProducer{}) assert.Equal(t, assert.AnError, r.ForceFlush(context.Background()), "export error not returned") assert.True(t, *called, "exporter Export method not called, pending telemetry not flushed") // Ensure Reader is allowed clean up attempt. _ = r.Shutdown(context.Background()) }) t.Run("ForceFlush timeout on producer", func(t *testing.T) { exp, called := expFunc(t) timeout := time.Millisecond r := NewPeriodicReader(exp, WithTimeout(timeout), WithProducer(testExternalProducer{})) r.register(testSDKProducer{ produceFunc: func(ctx context.Context, rm *metricdata.ResourceMetrics) error { select { case <-time.After(timeout + time.Second): *rm = testResourceMetricsA case <-ctx.Done(): // we timed out before we could collect metrics return ctx.Err() } return nil }, }) assert.ErrorIs(t, r.ForceFlush(context.Background()), context.DeadlineExceeded) assert.False(t, *called, "exporter Export method called when it should have failed before export") // Ensure Reader is allowed clean up attempt. _ = r.Shutdown(context.Background()) }) t.Run("ForceFlush timeout on external producer", func(t *testing.T) { exp, called := expFunc(t) timeout := time.Millisecond r := NewPeriodicReader(exp, WithTimeout(timeout), WithProducer(testExternalProducer{ produceFunc: func(ctx context.Context) ([]metricdata.ScopeMetrics, error) { select { case <-time.After(timeout + time.Second): case <-ctx.Done(): // we timed out before we could collect metrics return nil, ctx.Err() } return []metricdata.ScopeMetrics{testScopeMetricsA}, nil }, })) r.register(testSDKProducer{}) assert.ErrorIs(t, r.ForceFlush(context.Background()), context.DeadlineExceeded) assert.False(t, *called, "exporter Export method called when it should have failed before export") // Ensure Reader is allowed clean up attempt. _ = r.Shutdown(context.Background()) }) t.Run("Shutdown", func(t *testing.T) { exp, called := expFunc(t) r := NewPeriodicReader(exp, WithProducer(testExternalProducer{})) r.register(testSDKProducer{}) assert.Equal(t, assert.AnError, r.Shutdown(context.Background()), "export error not returned") assert.True(t, *called, "exporter Export method not called, pending telemetry not flushed") }) t.Run("Shutdown timeout on producer", func(t *testing.T) { exp, called := expFunc(t) timeout := time.Millisecond r := NewPeriodicReader(exp, WithTimeout(timeout), WithProducer(testExternalProducer{})) r.register(testSDKProducer{ produceFunc: func(ctx context.Context, rm *metricdata.ResourceMetrics) error { select { case <-time.After(timeout + time.Second): *rm = testResourceMetricsA case <-ctx.Done(): // we timed out before we could collect metrics return ctx.Err() } return nil }, }) assert.ErrorIs(t, r.Shutdown(context.Background()), context.DeadlineExceeded) assert.False(t, *called, "exporter Export method called when it should have failed before export") }) t.Run("Shutdown timeout on external producer", func(t *testing.T) { exp, called := expFunc(t) timeout := time.Millisecond r := NewPeriodicReader(exp, WithTimeout(timeout), WithProducer(testExternalProducer{ produceFunc: func(ctx context.Context) ([]metricdata.ScopeMetrics, error) { select { case <-time.After(timeout + time.Second): case <-ctx.Done(): // we timed out before we could collect metrics return nil, ctx.Err() } return []metricdata.ScopeMetrics{testScopeMetricsA}, nil }, })) r.register(testSDKProducer{}) assert.ErrorIs(t, r.Shutdown(context.Background()), context.DeadlineExceeded) assert.False(t, *called, "exporter Export method called when it should have failed before export") }) } func TestPeriodicReaderMultipleForceFlush(t *testing.T) { ctx := context.Background() r := NewPeriodicReader(new(fnExporter), WithProducer(testExternalProducer{})) r.register(testSDKProducer{}) require.NoError(t, r.ForceFlush(ctx)) require.NoError(t, r.ForceFlush(ctx)) } func BenchmarkPeriodicReader(b *testing.B) { b.Run("Collect", benchReaderCollectFunc( NewPeriodicReader(new(fnExporter)), )) } func TestPeriodiclReaderTemporality(t *testing.T) { tests := []struct { name string exporter *fnExporter // Currently only testing constant temporality. This should be expanded // if we put more advanced selection in the SDK wantTemporality metricdata.Temporality }{ { name: "default", exporter: new(fnExporter), wantTemporality: metricdata.CumulativeTemporality, }, { name: "delta", exporter: &fnExporter{temporalityFunc: deltaTemporalitySelector}, wantTemporality: metricdata.DeltaTemporality, }, { name: "cumulative", exporter: &fnExporter{temporalityFunc: cumulativeTemporalitySelector}, wantTemporality: metricdata.CumulativeTemporality, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var undefinedInstrument InstrumentKind rdr := NewPeriodicReader(tt.exporter) assert.Equal(t, tt.wantTemporality.String(), rdr.temporality(undefinedInstrument).String()) }) } } func TestPeriodicReaderCollect(t *testing.T) { expiredCtx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-1)) defer cancel() tests := []struct { name string ctx context.Context expectedErr error }{ { name: "with a valid context", ctx: context.Background(), expectedErr: nil, }, { name: "with an expired context", ctx: expiredCtx, expectedErr: context.DeadlineExceeded, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { rdr := NewPeriodicReader(new(fnExporter)) mp := NewMeterProvider(WithReader(rdr)) meter := mp.Meter("test") // Ensure the pipeline has a callback setup testM, err := meter.Int64ObservableCounter("test") assert.NoError(t, err) _, err = meter.RegisterCallback(func(_ context.Context, o metric.Observer) error { return nil }, testM) assert.NoError(t, err) rm := &metricdata.ResourceMetrics{} assert.Equal(t, tt.expectedErr, rdr.Collect(tt.ctx, rm)) }) } } opentelemetry-go-1.21.0/sdk/metric/pipeline.go000066400000000000000000000530171452547353200213120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "container/list" "context" "errors" "fmt" "strings" "sync" "sync/atomic" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/internal" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) var ( errCreatingAggregators = errors.New("could not create all aggregators") errIncompatibleAggregation = errors.New("incompatible aggregation") errUnknownAggregation = errors.New("unrecognized aggregation") ) // instrumentSync is a synchronization point between a pipeline and an // instrument's aggregate function. type instrumentSync struct { name string description string unit string compAgg aggregate.ComputeAggregation } func newPipeline(res *resource.Resource, reader Reader, views []View) *pipeline { if res == nil { res = resource.Empty() } return &pipeline{ resource: res, reader: reader, views: views, // aggregations is lazy allocated when needed. } } // pipeline connects all of the instruments created by a meter provider to a Reader. // This is the object that will be `Reader.register()` when a meter provider is created. // // As instruments are created the instrument should be checked if it exists in // the views of a the Reader, and if so each aggregate function should be added // to the pipeline. type pipeline struct { resource *resource.Resource reader Reader views []View sync.Mutex aggregations map[instrumentation.Scope][]instrumentSync callbacks []func(context.Context) error multiCallbacks list.List } // addSync adds the instrumentSync to pipeline p with scope. This method is not // idempotent. Duplicate calls will result in duplicate additions, it is the // callers responsibility to ensure this is called with unique values. func (p *pipeline) addSync(scope instrumentation.Scope, iSync instrumentSync) { p.Lock() defer p.Unlock() if p.aggregations == nil { p.aggregations = map[instrumentation.Scope][]instrumentSync{ scope: {iSync}, } return } p.aggregations[scope] = append(p.aggregations[scope], iSync) } // addCallback registers a single instrument callback to be run when // `produce()` is called. func (p *pipeline) addCallback(cback func(context.Context) error) { p.Lock() defer p.Unlock() p.callbacks = append(p.callbacks, cback) } type multiCallback func(context.Context) error // addMultiCallback registers a multi-instrument callback to be run when // `produce()` is called. func (p *pipeline) addMultiCallback(c multiCallback) (unregister func()) { p.Lock() defer p.Unlock() e := p.multiCallbacks.PushBack(c) return func() { p.Lock() p.multiCallbacks.Remove(e) p.Unlock() } } // produce returns aggregated metrics from a single collection. // // This method is safe to call concurrently. func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { p.Lock() defer p.Unlock() var errs multierror for _, c := range p.callbacks { // TODO make the callbacks parallel. ( #3034 ) if err := c(ctx); err != nil { errs.append(err) } if err := ctx.Err(); err != nil { rm.Resource = nil rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } } for e := p.multiCallbacks.Front(); e != nil; e = e.Next() { // TODO make the callbacks parallel. ( #3034 ) f := e.Value.(multiCallback) if err := f(ctx); err != nil { errs.append(err) } if err := ctx.Err(); err != nil { // This means the context expired before we finished running callbacks. rm.Resource = nil rm.ScopeMetrics = rm.ScopeMetrics[:0] return err } } rm.Resource = p.resource rm.ScopeMetrics = internal.ReuseSlice(rm.ScopeMetrics, len(p.aggregations)) i := 0 for scope, instruments := range p.aggregations { rm.ScopeMetrics[i].Metrics = internal.ReuseSlice(rm.ScopeMetrics[i].Metrics, len(instruments)) j := 0 for _, inst := range instruments { data := rm.ScopeMetrics[i].Metrics[j].Data if n := inst.compAgg(&data); n > 0 { rm.ScopeMetrics[i].Metrics[j].Name = inst.name rm.ScopeMetrics[i].Metrics[j].Description = inst.description rm.ScopeMetrics[i].Metrics[j].Unit = inst.unit rm.ScopeMetrics[i].Metrics[j].Data = data j++ } } rm.ScopeMetrics[i].Metrics = rm.ScopeMetrics[i].Metrics[:j] if len(rm.ScopeMetrics[i].Metrics) > 0 { rm.ScopeMetrics[i].Scope = scope i++ } } rm.ScopeMetrics = rm.ScopeMetrics[:i] return errs.errorOrNil() } // inserter facilitates inserting of new instruments from a single scope into a // pipeline. type inserter[N int64 | float64] struct { // aggregators is a cache that holds aggregate function inputs whose // outputs have been inserted into the underlying reader pipeline. This // cache ensures no duplicate aggregate functions are inserted into the // reader pipeline and if a new request during an instrument creation asks // for the same aggregate function input the same instance is returned. aggregators *cache[instID, aggVal[N]] // views is a cache that holds instrument identifiers for all the // instruments a Meter has created, it is provided from the Meter that owns // this inserter. This cache ensures during the creation of instruments // with the same name but different options (e.g. description, unit) a // warning message is logged. views *cache[string, instID] pipeline *pipeline } func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *inserter[N] { if vc == nil { vc = &cache[string, instID]{} } return &inserter[N]{ aggregators: &cache[instID, aggVal[N]]{}, views: vc, pipeline: p, } } // Instrument inserts the instrument inst with instUnit into a pipeline. All // views the pipeline contains are matched against, and any matching view that // creates a unique aggregate function will have its output inserted into the // pipeline and its input included in the returned slice. // // The returned aggregate function inputs are ensured to be deduplicated and // unique. If another view in another pipeline that is cached by this // inserter's cache has already inserted the same aggregate function for the // same instrument, that functions input instance is returned. // // If another instrument has already been inserted by this inserter, or any // other using the same cache, and it conflicts with the instrument being // inserted in this call, an aggregate function input matching the arguments // will still be returned but an Info level log message will also be logged to // the OTel global logger. // // If the passed instrument would result in an incompatible aggregate function, // an error is returned and that aggregate function output is not inserted nor // is its input returned. // // If an instrument is determined to use a Drop aggregation, that instrument is // not inserted nor returned. func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) { var ( matched bool measures []aggregate.Measure[N] ) errs := &multierror{wrapped: errCreatingAggregators} seen := make(map[uint64]struct{}) for _, v := range i.pipeline.views { stream, match := v(inst) if !match { continue } matched = true in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) if err != nil { errs.append(err) } if in == nil { // Drop aggregation. continue } if _, ok := seen[id]; ok { // This aggregate function has already been added. continue } seen[id] = struct{}{} measures = append(measures, in) } if matched { return measures, errs.errorOrNil() } // Apply implicit default view if no explicit matched. stream := Stream{ Name: inst.Name, Description: inst.Description, Unit: inst.Unit, } in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation) if err != nil { errs.append(err) } if in != nil { // Ensured to have not seen given matched was false. measures = append(measures, in) } return measures, errs.errorOrNil() } var aggIDCount uint64 // aggVal is the cached value in an aggregators cache. type aggVal[N int64 | float64] struct { ID uint64 Measure aggregate.Measure[N] Err error } // readerDefaultAggregation returns the default aggregation for the instrument // kind based on the reader's aggregation preferences. This is used unless the // aggregation is overridden with a view. func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation { aggregation := i.pipeline.reader.aggregation(kind) switch aggregation.(type) { case nil, AggregationDefault: // If the reader returns default or nil use the default selector. aggregation = DefaultAggregationSelector(kind) default: // Deep copy and validate before using. aggregation = aggregation.copy() if err := aggregation.err(); err != nil { orig := aggregation aggregation = DefaultAggregationSelector(kind) global.Error( err, "using default aggregation instead", "aggregation", orig, "replacement", aggregation, ) } } return aggregation } // cachedAggregator returns the appropriate aggregate input and output // functions for an instrument configuration. If the exact instrument has been // created within the inst.Scope, those aggregate function instances will be // returned. Otherwise, new computed aggregate functions will be cached and // returned. // // If the instrument configuration conflicts with an instrument that has // already been created (e.g. description, unit, data type) a warning will be // logged at the "Info" level with the global OTel logger. Valid new aggregate // functions for the instrument configuration will still be returned without an // error. // // If the instrument defines an unknown or incompatible aggregation, an error // is returned. func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) { switch stream.Aggregation.(type) { case nil: // The aggregation was not overridden with a view. Use the aggregation // provided by the reader. stream.Aggregation = readerAggregation case AggregationDefault: // The view explicitly requested the default aggregation. stream.Aggregation = DefaultAggregationSelector(kind) } if err := isAggregatorCompatible(kind, stream.Aggregation); err != nil { return nil, 0, fmt.Errorf( "creating aggregator with instrumentKind: %d, aggregation %v: %w", kind, stream.Aggregation, err, ) } id := i.instID(kind, stream) // If there is a conflict, the specification says the view should // still be applied and a warning should be logged. i.logConflict(id) // If there are requests for the same instrument with different name // casing, the first-seen needs to be returned. Use a normalize ID for the // cache lookup to ensure the correct comparison. normID := id.normalize() cv := i.aggregators.Lookup(normID, func() aggVal[N] { b := aggregate.Builder[N]{ Temporality: i.pipeline.reader.temporality(kind), } b.Filter = stream.AttributeFilter in, out, err := i.aggregateFunc(b, stream.Aggregation, kind) if err != nil { return aggVal[N]{0, nil, err} } if in == nil { // Drop aggregator. return aggVal[N]{0, nil, nil} } i.pipeline.addSync(scope, instrumentSync{ // Use the first-seen name casing for this and all subsequent // requests of this instrument. name: stream.Name, description: stream.Description, unit: stream.Unit, compAgg: out, }) id := atomic.AddUint64(&aggIDCount, 1) return aggVal[N]{id, in, err} }) return cv.Measure, cv.ID, cv.Err } // logConflict validates if an instrument with the same case-insensitive name // as id has already been created. If that instrument conflicts with id, a // warning is logged. func (i *inserter[N]) logConflict(id instID) { // The API specification defines names as case-insensitive. If there is a // different casing of a name it needs to be a conflict. name := id.normalize().Name existing := i.views.Lookup(name, func() instID { return id }) if id == existing { return } const msg = "duplicate metric stream definitions" args := []interface{}{ "names", fmt.Sprintf("%q, %q", existing.Name, id.Name), "descriptions", fmt.Sprintf("%q, %q", existing.Description, id.Description), "kinds", fmt.Sprintf("%s, %s", existing.Kind, id.Kind), "units", fmt.Sprintf("%s, %s", existing.Unit, id.Unit), "numbers", fmt.Sprintf("%s, %s", existing.Number, id.Number), } // The specification recommends logging a suggested view to resolve // conflicts if possible. // // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#duplicate-instrument-registration if id.Unit != existing.Unit || id.Number != existing.Number { // There is no view resolution for these, don't make a suggestion. global.Warn(msg, args...) return } var stream string if id.Name != existing.Name || id.Kind != existing.Kind { stream = `Stream{Name: "{{NEW_NAME}}"}` } else if id.Description != existing.Description { stream = fmt.Sprintf("Stream{Description: %q}", existing.Description) } inst := fmt.Sprintf( "Instrument{Name: %q, Description: %q, Kind: %q, Unit: %q}", id.Name, id.Description, "InstrumentKind"+id.Kind.String(), id.Unit, ) args = append(args, "suggested.view", fmt.Sprintf("NewView(%s, %s)", inst, stream)) global.Warn(msg, args...) } func (i *inserter[N]) instID(kind InstrumentKind, stream Stream) instID { var zero N return instID{ Name: stream.Name, Description: stream.Description, Unit: stream.Unit, Kind: kind, Number: fmt.Sprintf("%T", zero), } } // aggregateFunc returns new aggregate functions matching agg, kind, and // monotonic. If the agg is unknown or temporality is invalid, an error is // returned. func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kind InstrumentKind) (meas aggregate.Measure[N], comp aggregate.ComputeAggregation, err error) { switch a := agg.(type) { case AggregationDefault: return i.aggregateFunc(b, DefaultAggregationSelector(kind), kind) case AggregationDrop: // Return nil in and out to signify the drop aggregator. case AggregationLastValue: meas, comp = b.LastValue() case AggregationSum: switch kind { case InstrumentKindObservableCounter: meas, comp = b.PrecomputedSum(true) case InstrumentKindObservableUpDownCounter: meas, comp = b.PrecomputedSum(false) case InstrumentKindCounter, InstrumentKindHistogram: meas, comp = b.Sum(true) default: // InstrumentKindUpDownCounter, InstrumentKindObservableGauge, and // instrumentKindUndefined or other invalid instrument kinds. meas, comp = b.Sum(false) } case AggregationExplicitBucketHistogram: var noSum bool switch kind { case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: // The sum should not be collected for any instrument that can make // negative measurements: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations noSum = true } meas, comp = b.ExplicitBucketHistogram(a.Boundaries, a.NoMinMax, noSum) case AggregationBase2ExponentialHistogram: var noSum bool switch kind { case InstrumentKindUpDownCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: // The sum should not be collected for any instrument that can make // negative measurements: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.21.0/specification/metrics/sdk.md#histogram-aggregations noSum = true } meas, comp = b.ExponentialBucketHistogram(a.MaxSize, a.MaxScale, a.NoMinMax, noSum) default: err = errUnknownAggregation } return meas, comp, err } // isAggregatorCompatible checks if the aggregation can be used by the instrument. // Current compatibility: // // | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram | // |--------------------------|------|-----------|-----|-----------|-----------------------| // | Counter | ✓ | | ✓ | ✓ | ✓ | // | UpDownCounter | ✓ | | ✓ | ✓ | ✓ | // | Histogram | ✓ | | ✓ | ✓ | ✓ | // | Observable Counter | ✓ | | ✓ | ✓ | ✓ | // | Observable UpDownCounter | ✓ | | ✓ | ✓ | ✓ | // | Observable Gauge | ✓ | ✓ | | ✓ | ✓ |. func isAggregatorCompatible(kind InstrumentKind, agg Aggregation) error { switch agg.(type) { case AggregationDefault: return nil case AggregationExplicitBucketHistogram, AggregationBase2ExponentialHistogram: switch kind { case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindHistogram, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge: return nil default: return errIncompatibleAggregation } case AggregationSum: switch kind { case InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindCounter, InstrumentKindHistogram, InstrumentKindUpDownCounter: return nil default: // TODO: review need for aggregation check after // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 return errIncompatibleAggregation } case AggregationLastValue: if kind == InstrumentKindObservableGauge { return nil } // TODO: review need for aggregation check after // https://github.com/open-telemetry/opentelemetry-specification/issues/2710 return errIncompatibleAggregation case AggregationDrop: return nil default: // This is used passed checking for default, it should be an error at this point. return fmt.Errorf("%w: %v", errUnknownAggregation, agg) } } // pipelines is the group of pipelines connecting Readers with instrument // measurement. type pipelines []*pipeline func newPipelines(res *resource.Resource, readers []Reader, views []View) pipelines { pipes := make([]*pipeline, 0, len(readers)) for _, r := range readers { p := newPipeline(res, r, views) r.register(p) pipes = append(pipes, p) } return pipes } func (p pipelines) registerCallback(cback func(context.Context) error) { for _, pipe := range p { pipe.addCallback(cback) } } func (p pipelines) registerMultiCallback(c multiCallback) metric.Registration { unregs := make([]func(), len(p)) for i, pipe := range p { unregs[i] = pipe.addMultiCallback(c) } return unregisterFuncs{f: unregs} } type unregisterFuncs struct { embedded.Registration f []func() } func (u unregisterFuncs) Unregister() error { for _, f := range u.f { f() } return nil } // resolver facilitates resolving aggregate functions an instrument calls to // aggregate measurements with while updating all pipelines that need to pull // from those aggregations. type resolver[N int64 | float64] struct { inserters []*inserter[N] } func newResolver[N int64 | float64](p pipelines, vc *cache[string, instID]) resolver[N] { in := make([]*inserter[N], len(p)) for i := range in { in[i] = newInserter[N](p[i], vc) } return resolver[N]{in} } // Aggregators returns the Aggregators that must be updated by the instrument // defined by key. func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] errs := &multierror{} for _, i := range r.inserters { in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind)) if err != nil { errs.append(err) } measures = append(measures, in...) } return measures, errs.errorOrNil() } // HistogramAggregators returns the histogram Aggregators that must be updated by the instrument // defined by key. If boundaries were provided on instrument instantiation, those take precedence // over boundaries provided by the reader. func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) { var measures []aggregate.Measure[N] errs := &multierror{} for _, i := range r.inserters { agg := i.readerDefaultAggregation(id.Kind) if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 { histAgg.Boundaries = boundaries agg = histAgg } in, err := i.Instrument(id, agg) if err != nil { errs.append(err) } measures = append(measures, in...) } return measures, errs.errorOrNil() } type multierror struct { wrapped error errors []string } func (m *multierror) errorOrNil() error { if len(m.errors) == 0 { return nil } if m.wrapped == nil { return errors.New(strings.Join(m.errors, "; ")) } return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) } func (m *multierror) append(err error) { m.errors = append(m.errors, err.Error()) } opentelemetry-go-1.21.0/sdk/metric/pipeline_registry_test.go000066400000000000000000000647621452547353200243120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "sync/atomic" "testing" "github.com/go-logr/logr" "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/metric/internal/aggregate" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" "go.opentelemetry.io/otel/sdk/resource" ) var defaultView = NewView(Instrument{Name: "*"}, Stream{}) type invalidAggregation struct{} func (invalidAggregation) copy() Aggregation { return invalidAggregation{} } func (invalidAggregation) err() error { return nil } func requireN[N int64 | float64](t *testing.T, n int, m []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() assert.NoError(t, err) require.Len(t, m, n) require.Len(t, comps, n) } func assertSum[N int64 | float64](n int, temp metricdata.Temporality, mono bool, v [2]N) func(*testing.T, []aggregate.Measure[N], []aggregate.ComputeAggregation, error) { return func(t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() requireN[N](t, n, meas, comps, err) for m := 0; m < n; m++ { t.Logf("input/output number: %d", m) in, out := meas[m], comps[m] in(context.Background(), 1, *attribute.EmptySet()) var got metricdata.Aggregation assert.Equal(t, 1, out(&got), "1 data-point expected") metricdatatest.AssertAggregationsEqual(t, metricdata.Sum[N]{ Temporality: temp, IsMonotonic: mono, DataPoints: []metricdata.DataPoint[N]{{Value: v[0]}}, }, got, metricdatatest.IgnoreTimestamp()) in(context.Background(), 3, *attribute.EmptySet()) assert.Equal(t, 1, out(&got), "1 data-point expected") metricdatatest.AssertAggregationsEqual(t, metricdata.Sum[N]{ Temporality: temp, IsMonotonic: mono, DataPoints: []metricdata.DataPoint[N]{{Value: v[1]}}, }, got, metricdatatest.IgnoreTimestamp()) } } } func assertHist[N int64 | float64](temp metricdata.Temporality) func(*testing.T, []aggregate.Measure[N], []aggregate.ComputeAggregation, error) { return func(t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() requireN[N](t, 1, meas, comps, err) in, out := meas[0], comps[0] in(context.Background(), 1, *attribute.EmptySet()) var got metricdata.Aggregation assert.Equal(t, 1, out(&got), "1 data-point expected") buckets := []uint64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} n := 1 metricdatatest.AssertAggregationsEqual(t, metricdata.Histogram[N]{ Temporality: temp, DataPoints: []metricdata.HistogramDataPoint[N]{{ Count: uint64(n), Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: buckets, Min: metricdata.NewExtrema[N](1), Max: metricdata.NewExtrema[N](1), Sum: N(n), }}, }, got, metricdatatest.IgnoreTimestamp()) in(context.Background(), 1, *attribute.EmptySet()) if temp == metricdata.CumulativeTemporality { buckets[1] = 2 n = 2 } assert.Equal(t, 1, out(&got), "1 data-point expected") metricdatatest.AssertAggregationsEqual(t, metricdata.Histogram[N]{ Temporality: temp, DataPoints: []metricdata.HistogramDataPoint[N]{{ Count: uint64(n), Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, BucketCounts: buckets, Min: metricdata.NewExtrema[N](1), Max: metricdata.NewExtrema[N](1), Sum: N(n), }}, }, got, metricdatatest.IgnoreTimestamp()) } } func assertLastValue[N int64 | float64](t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() requireN[N](t, 1, meas, comps, err) in, out := meas[0], comps[0] in(context.Background(), 10, *attribute.EmptySet()) in(context.Background(), 1, *attribute.EmptySet()) var got metricdata.Aggregation assert.Equal(t, 1, out(&got), "1 data-point expected") metricdatatest.AssertAggregationsEqual(t, metricdata.Gauge[N]{ DataPoints: []metricdata.DataPoint[N]{{Value: 1}}, }, got, metricdatatest.IgnoreTimestamp()) } func testCreateAggregators[N int64 | float64](t *testing.T) { changeAggView := NewView( Instrument{Name: "foo"}, Stream{Aggregation: AggregationExplicitBucketHistogram{ Boundaries: []float64{0, 100}, NoMinMax: true, }}, ) renameView := NewView(Instrument{Name: "foo"}, Stream{Name: "bar"}) invalidAggView := NewView( Instrument{Name: "foo"}, Stream{Aggregation: invalidAggregation{}}, ) instruments := []Instrument{ {Name: "foo", Kind: InstrumentKind(0)}, // Unknown kind {Name: "foo", Kind: InstrumentKindCounter}, {Name: "foo", Kind: InstrumentKindUpDownCounter}, {Name: "foo", Kind: InstrumentKindHistogram}, {Name: "foo", Kind: InstrumentKindObservableCounter}, {Name: "foo", Kind: InstrumentKindObservableUpDownCounter}, {Name: "foo", Kind: InstrumentKindObservableGauge}, } testcases := []struct { name string reader Reader views []View inst Instrument validate func(*testing.T, []aggregate.Measure[N], []aggregate.ComputeAggregation, error) }{ { name: "Default/Drop", reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDrop{} })), inst: instruments[InstrumentKindCounter], validate: func(t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() assert.NoError(t, err) assert.Len(t, meas, 0) assert.Len(t, comps, 0) }, }, { name: "Default/Delta/Sum/NonMonotonic", reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), inst: instruments[InstrumentKindUpDownCounter], validate: assertSum[N](1, metricdata.DeltaTemporality, false, [2]N{1, 3}), }, { name: "Default/Delta/ExplicitBucketHistogram", reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), inst: instruments[InstrumentKindHistogram], validate: assertHist[N](metricdata.DeltaTemporality), }, { name: "Default/Delta/PrecomputedSum/Monotonic", reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), inst: instruments[InstrumentKindObservableCounter], validate: assertSum[N](1, metricdata.DeltaTemporality, true, [2]N{1, 2}), }, { name: "Default/Delta/PrecomputedSum/NonMonotonic", reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), inst: instruments[InstrumentKindObservableUpDownCounter], validate: assertSum[N](1, metricdata.DeltaTemporality, false, [2]N{1, 2}), }, { name: "Default/Delta/Gauge", reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), inst: instruments[InstrumentKindObservableGauge], validate: assertLastValue[N], }, { name: "Default/Delta/Sum/Monotonic", reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), inst: instruments[InstrumentKindCounter], validate: assertSum[N](1, metricdata.DeltaTemporality, true, [2]N{1, 3}), }, { name: "Default/Cumulative/Sum/NonMonotonic", reader: NewManualReader(), inst: instruments[InstrumentKindUpDownCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, false, [2]N{1, 4}), }, { name: "Default/Cumulative/ExplicitBucketHistogram", reader: NewManualReader(), inst: instruments[InstrumentKindHistogram], validate: assertHist[N](metricdata.CumulativeTemporality), }, { name: "Default/Cumulative/PrecomputedSum/Monotonic", reader: NewManualReader(), inst: instruments[InstrumentKindObservableCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, true, [2]N{1, 3}), }, { name: "Default/Cumulative/PrecomputedSum/NonMonotonic", reader: NewManualReader(), inst: instruments[InstrumentKindObservableUpDownCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, false, [2]N{1, 3}), }, { name: "Default/Cumulative/Gauge", reader: NewManualReader(), inst: instruments[InstrumentKindObservableGauge], validate: assertLastValue[N], }, { name: "Default/Cumulative/Sum/Monotonic", reader: NewManualReader(), inst: instruments[InstrumentKindCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, true, [2]N{1, 4}), }, { name: "ViewHasPrecedence", reader: NewManualReader(), views: []View{changeAggView}, inst: instruments[InstrumentKindCounter], validate: func(t *testing.T, meas []aggregate.Measure[N], comps []aggregate.ComputeAggregation, err error) { t.Helper() requireN[N](t, 1, meas, comps, err) in, out := meas[0], comps[0] in(context.Background(), 1, *attribute.EmptySet()) var got metricdata.Aggregation assert.Equal(t, 1, out(&got), "1 data-point expected") metricdatatest.AssertAggregationsEqual(t, metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{{ Count: 1, Bounds: []float64{0, 100}, BucketCounts: []uint64{0, 1, 0}, Sum: 1, }}, }, got, metricdatatest.IgnoreTimestamp()) in(context.Background(), 1, *attribute.EmptySet()) assert.Equal(t, 1, out(&got), "1 data-point expected") metricdatatest.AssertAggregationsEqual(t, metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{{ Count: 2, Bounds: []float64{0, 100}, BucketCounts: []uint64{0, 2, 0}, Sum: 2, }}, }, got, metricdatatest.IgnoreTimestamp()) }, }, { name: "MultipleViews", reader: NewManualReader(), views: []View{defaultView, renameView}, inst: instruments[InstrumentKindCounter], validate: assertSum[N](2, metricdata.CumulativeTemporality, true, [2]N{1, 4}), }, { name: "Reader/Default/Cumulative/Sum/Monotonic", reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), inst: instruments[InstrumentKindCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, true, [2]N{1, 4}), }, { name: "Reader/Default/Cumulative/Sum/NonMonotonic", reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), inst: instruments[InstrumentKindUpDownCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, false, [2]N{1, 4}), }, { name: "Reader/Default/Cumulative/ExplicitBucketHistogram", reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), inst: instruments[InstrumentKindHistogram], validate: assertHist[N](metricdata.CumulativeTemporality), }, { name: "Reader/Default/Cumulative/PrecomputedSum/Monotonic", reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), inst: instruments[InstrumentKindObservableCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, true, [2]N{1, 3}), }, { name: "Reader/Default/Cumulative/PrecomputedSum/NonMonotonic", reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), inst: instruments[InstrumentKindObservableUpDownCounter], validate: assertSum[N](1, metricdata.CumulativeTemporality, false, [2]N{1, 3}), }, { name: "Reader/Default/Gauge", reader: NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationDefault{} })), inst: instruments[InstrumentKindObservableGauge], validate: assertLastValue[N], }, { name: "InvalidAggregation", reader: NewManualReader(), views: []View{invalidAggView}, inst: instruments[InstrumentKindCounter], validate: func(t *testing.T, _ []aggregate.Measure[N], _ []aggregate.ComputeAggregation, err error) { assert.ErrorIs(t, err, errCreatingAggregators) }, }, } for _, tt := range testcases { t.Run(tt.name, func(t *testing.T) { var c cache[string, instID] p := newPipeline(nil, tt.reader, tt.views) i := newInserter[N](p, &c) readerAggregation := i.readerDefaultAggregation(tt.inst.Kind) input, err := i.Instrument(tt.inst, readerAggregation) var comps []aggregate.ComputeAggregation for _, instSyncs := range p.aggregations { for _, i := range instSyncs { comps = append(comps, i.compAgg) } } tt.validate(t, input, comps, err) }) } } func TestCreateAggregators(t *testing.T) { t.Run("Int64", testCreateAggregators[int64]) t.Run("Float64", testCreateAggregators[float64]) } func testInvalidInstrumentShouldPanic[N int64 | float64]() { var c cache[string, instID] i := newInserter[N](newPipeline(nil, NewManualReader(), []View{defaultView}), &c) inst := Instrument{ Name: "foo", Kind: InstrumentKind(255), } readerAggregation := i.readerDefaultAggregation(inst.Kind) _, _ = i.Instrument(inst, readerAggregation) } func TestInvalidInstrumentShouldPanic(t *testing.T) { assert.Panics(t, testInvalidInstrumentShouldPanic[int64]) assert.Panics(t, testInvalidInstrumentShouldPanic[float64]) } func TestPipelinesAggregatorForEachReader(t *testing.T) { r0, r1 := NewManualReader(), NewManualReader() pipes := newPipelines(resource.Empty(), []Reader{r0, r1}, nil) require.Len(t, pipes, 2, "created pipelines") inst := Instrument{Name: "foo", Kind: InstrumentKindCounter} var c cache[string, instID] r := newResolver[int64](pipes, &c) aggs, err := r.Aggregators(inst) require.NoError(t, err, "resolved Aggregators error") require.Len(t, aggs, 2, "instrument aggregators") for i, p := range pipes { var aggN int for _, is := range p.aggregations { aggN += len(is) } assert.Equalf(t, 1, aggN, "pipeline %d: number of instrumentSync", i) } } func TestPipelineRegistryCreateAggregators(t *testing.T) { renameView := NewView(Instrument{Name: "foo"}, Stream{Name: "bar"}) testRdr := NewManualReader() testRdrHistogram := NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationExplicitBucketHistogram{} })) testCases := []struct { name string readers []Reader views []View inst Instrument wantCount int }{ { name: "No views have no aggregators", inst: Instrument{Name: "foo"}, }, { name: "1 reader 1 view gets 1 aggregator", inst: Instrument{Name: "foo"}, readers: []Reader{testRdr}, wantCount: 1, }, { name: "1 reader 2 views gets 2 aggregator", inst: Instrument{Name: "foo"}, readers: []Reader{testRdr}, views: []View{defaultView, renameView}, wantCount: 2, }, { name: "2 readers 1 view each gets 2 aggregators", inst: Instrument{Name: "foo"}, readers: []Reader{testRdr, testRdrHistogram}, wantCount: 2, }, { name: "2 reader 2 views each gets 4 aggregators", inst: Instrument{Name: "foo"}, readers: []Reader{testRdr, testRdrHistogram}, views: []View{defaultView, renameView}, wantCount: 4, }, { name: "An instrument is duplicated in two views share the same aggregator", inst: Instrument{Name: "foo"}, readers: []Reader{testRdr}, views: []View{defaultView, defaultView}, wantCount: 1, }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { p := newPipelines(resource.Empty(), tt.readers, tt.views) testPipelineRegistryResolveIntAggregators(t, p, tt.wantCount) testPipelineRegistryResolveFloatAggregators(t, p, tt.wantCount) testPipelineRegistryResolveIntHistogramAggregators(t, p, tt.wantCount) testPipelineRegistryResolveFloatHistogramAggregators(t, p, tt.wantCount) }) } } func testPipelineRegistryResolveIntAggregators(t *testing.T, p pipelines, wantCount int) { inst := Instrument{Name: "foo", Kind: InstrumentKindCounter} var c cache[string, instID] r := newResolver[int64](p, &c) aggs, err := r.Aggregators(inst) assert.NoError(t, err) require.Len(t, aggs, wantCount) } func testPipelineRegistryResolveFloatAggregators(t *testing.T, p pipelines, wantCount int) { inst := Instrument{Name: "foo", Kind: InstrumentKindCounter} var c cache[string, instID] r := newResolver[float64](p, &c) aggs, err := r.Aggregators(inst) assert.NoError(t, err) require.Len(t, aggs, wantCount) } func testPipelineRegistryResolveIntHistogramAggregators(t *testing.T, p pipelines, wantCount int) { inst := Instrument{Name: "foo", Kind: InstrumentKindCounter} var c cache[string, instID] r := newResolver[int64](p, &c) aggs, err := r.HistogramAggregators(inst, []float64{1, 2, 3}) assert.NoError(t, err) require.Len(t, aggs, wantCount) } func testPipelineRegistryResolveFloatHistogramAggregators(t *testing.T, p pipelines, wantCount int) { inst := Instrument{Name: "foo", Kind: InstrumentKindCounter} var c cache[string, instID] r := newResolver[float64](p, &c) aggs, err := r.HistogramAggregators(inst, []float64{1, 2, 3}) assert.NoError(t, err) require.Len(t, aggs, wantCount) } func TestPipelineRegistryResource(t *testing.T) { v := NewView(Instrument{Name: "bar"}, Stream{Name: "foo"}) readers := []Reader{NewManualReader()} views := []View{defaultView, v} res := resource.NewSchemaless(attribute.String("key", "val")) pipes := newPipelines(res, readers, views) for _, p := range pipes { assert.True(t, res.Equal(p.resource), "resource not set") } } func TestPipelineRegistryCreateAggregatorsIncompatibleInstrument(t *testing.T) { testRdrHistogram := NewManualReader(WithAggregationSelector(func(ik InstrumentKind) Aggregation { return AggregationSum{} })) readers := []Reader{testRdrHistogram} views := []View{defaultView} p := newPipelines(resource.Empty(), readers, views) inst := Instrument{Name: "foo", Kind: InstrumentKindObservableGauge} var vc cache[string, instID] ri := newResolver[int64](p, &vc) intAggs, err := ri.Aggregators(inst) assert.Error(t, err) assert.Len(t, intAggs, 0) rf := newResolver[float64](p, &vc) floatAggs, err := rf.Aggregators(inst) assert.Error(t, err) assert.Len(t, floatAggs, 0) intAggs, err = ri.HistogramAggregators(inst, []float64{1, 2, 3}) assert.Error(t, err) assert.Len(t, intAggs, 0) floatAggs, err = rf.HistogramAggregators(inst, []float64{1, 2, 3}) assert.Error(t, err) assert.Len(t, floatAggs, 0) } type logCounter struct { logr.LogSink errN uint32 infoN uint32 } func (l *logCounter) Info(level int, msg string, keysAndValues ...interface{}) { atomic.AddUint32(&l.infoN, 1) l.LogSink.Info(level, msg, keysAndValues...) } func (l *logCounter) InfoN() int { return int(atomic.SwapUint32(&l.infoN, 0)) } func (l *logCounter) Error(err error, msg string, keysAndValues ...interface{}) { atomic.AddUint32(&l.errN, 1) l.LogSink.Error(err, msg, keysAndValues...) } func (l *logCounter) ErrorN() int { return int(atomic.SwapUint32(&l.errN, 0)) } func TestResolveAggregatorsDuplicateErrors(t *testing.T) { tLog := testr.NewWithOptions(t, testr.Options{Verbosity: 6}) l := &logCounter{LogSink: tLog.GetSink()} otel.SetLogger(logr.New(l)) renameView := NewView(Instrument{Name: "bar"}, Stream{Name: "foo"}) readers := []Reader{NewManualReader()} views := []View{defaultView, renameView} fooInst := Instrument{Name: "foo", Kind: InstrumentKindCounter} barInst := Instrument{Name: "bar", Kind: InstrumentKindCounter} p := newPipelines(resource.Empty(), readers, views) var vc cache[string, instID] ri := newResolver[int64](p, &vc) intAggs, err := ri.Aggregators(fooInst) assert.NoError(t, err) assert.Equal(t, 0, l.InfoN(), "no info logging should happen") assert.Len(t, intAggs, 1) // The Rename view should produce the same instrument without an error, the // default view should also cause a new aggregator to be returned. intAggs, err = ri.Aggregators(barInst) assert.NoError(t, err) assert.Equal(t, 0, l.InfoN(), "no info logging should happen") assert.Len(t, intAggs, 2) // Creating a float foo instrument should log a warning because there is an // int foo instrument. rf := newResolver[float64](p, &vc) floatAggs, err := rf.Aggregators(fooInst) assert.NoError(t, err) assert.Equal(t, 1, l.InfoN(), "instrument conflict not logged") assert.Len(t, floatAggs, 1) fooInst = Instrument{Name: "foo-float", Kind: InstrumentKindCounter} floatAggs, err = rf.Aggregators(fooInst) assert.NoError(t, err) assert.Equal(t, 0, l.InfoN(), "no info logging should happen") assert.Len(t, floatAggs, 1) floatAggs, err = rf.Aggregators(barInst) assert.NoError(t, err) // Both the rename and default view aggregators created above should now // conflict. Therefore, 2 warning messages should be logged. assert.Equal(t, 2, l.InfoN(), "instrument conflicts not logged") assert.Len(t, floatAggs, 2) } func TestIsAggregatorCompatible(t *testing.T) { var undefinedInstrument InstrumentKind testCases := []struct { name string kind InstrumentKind agg Aggregation want error }{ { name: "SyncCounter and Drop", kind: InstrumentKindCounter, agg: AggregationDrop{}, }, { name: "SyncCounter and LastValue", kind: InstrumentKindCounter, agg: AggregationLastValue{}, want: errIncompatibleAggregation, }, { name: "SyncCounter and Sum", kind: InstrumentKindCounter, agg: AggregationSum{}, }, { name: "SyncCounter and ExplicitBucketHistogram", kind: InstrumentKindCounter, agg: AggregationExplicitBucketHistogram{}, }, { name: "SyncCounter and ExponentialHistogram", kind: InstrumentKindCounter, agg: AggregationBase2ExponentialHistogram{}, }, { name: "SyncUpDownCounter and Drop", kind: InstrumentKindUpDownCounter, agg: AggregationDrop{}, }, { name: "SyncUpDownCounter and LastValue", kind: InstrumentKindUpDownCounter, agg: AggregationLastValue{}, want: errIncompatibleAggregation, }, { name: "SyncUpDownCounter and Sum", kind: InstrumentKindUpDownCounter, agg: AggregationSum{}, }, { name: "SyncUpDownCounter and ExplicitBucketHistogram", kind: InstrumentKindUpDownCounter, agg: AggregationExplicitBucketHistogram{}, }, { name: "SyncUpDownCounter and ExponentialHistogram", kind: InstrumentKindUpDownCounter, agg: AggregationBase2ExponentialHistogram{}, }, { name: "SyncHistogram and Drop", kind: InstrumentKindHistogram, agg: AggregationDrop{}, }, { name: "SyncHistogram and LastValue", kind: InstrumentKindHistogram, agg: AggregationLastValue{}, want: errIncompatibleAggregation, }, { name: "SyncHistogram and Sum", kind: InstrumentKindHistogram, agg: AggregationSum{}, }, { name: "SyncHistogram and ExplicitBucketHistogram", kind: InstrumentKindHistogram, agg: AggregationExplicitBucketHistogram{}, }, { name: "SyncHistogram and ExponentialHistogram", kind: InstrumentKindHistogram, agg: AggregationBase2ExponentialHistogram{}, }, { name: "ObservableCounter and Drop", kind: InstrumentKindObservableCounter, agg: AggregationDrop{}, }, { name: "ObservableCounter and LastValue", kind: InstrumentKindObservableCounter, agg: AggregationLastValue{}, want: errIncompatibleAggregation, }, { name: "ObservableCounter and Sum", kind: InstrumentKindObservableCounter, agg: AggregationSum{}, }, { name: "ObservableCounter and ExplicitBucketHistogram", kind: InstrumentKindObservableCounter, agg: AggregationExplicitBucketHistogram{}, }, { name: "ObservableCounter and ExponentialHistogram", kind: InstrumentKindObservableCounter, agg: AggregationBase2ExponentialHistogram{}, }, { name: "ObservableUpDownCounter and Drop", kind: InstrumentKindObservableUpDownCounter, agg: AggregationDrop{}, }, { name: "ObservableUpDownCounter and LastValue", kind: InstrumentKindObservableUpDownCounter, agg: AggregationLastValue{}, want: errIncompatibleAggregation, }, { name: "ObservableUpDownCounter and Sum", kind: InstrumentKindObservableUpDownCounter, agg: AggregationSum{}, }, { name: "ObservableUpDownCounter and ExplicitBucketHistogram", kind: InstrumentKindObservableUpDownCounter, agg: AggregationExplicitBucketHistogram{}, }, { name: "ObservableUpDownCounter and ExponentialHistogram", kind: InstrumentKindObservableUpDownCounter, agg: AggregationBase2ExponentialHistogram{}, }, { name: "ObservableGauge and Drop", kind: InstrumentKindObservableGauge, agg: AggregationDrop{}, }, { name: "ObservableGauge and LastValue{}", kind: InstrumentKindObservableGauge, agg: AggregationLastValue{}, }, { name: "ObservableGauge and Sum", kind: InstrumentKindObservableGauge, agg: AggregationSum{}, want: errIncompatibleAggregation, }, { name: "ObservableGauge and ExplicitBucketHistogram", kind: InstrumentKindObservableGauge, agg: AggregationExplicitBucketHistogram{}, }, { name: "ObservableGauge and ExponentialHistogram", kind: InstrumentKindObservableGauge, agg: AggregationBase2ExponentialHistogram{}, }, { name: "unknown kind with Sum should error", kind: undefinedInstrument, agg: AggregationSum{}, want: errIncompatibleAggregation, }, { name: "unknown kind with LastValue should error", kind: undefinedInstrument, agg: AggregationLastValue{}, want: errIncompatibleAggregation, }, { name: "unknown kind with Histogram should error", kind: undefinedInstrument, agg: AggregationExplicitBucketHistogram{}, want: errIncompatibleAggregation, }, { name: "unknown kind with Histogram should error", kind: undefinedInstrument, agg: AggregationBase2ExponentialHistogram{}, want: errIncompatibleAggregation, }, } for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { err := isAggregatorCompatible(tt.kind, tt.agg) assert.ErrorIs(t, err, tt.want) }) } } opentelemetry-go-1.21.0/sdk/metric/pipeline_test.go000066400000000000000000000243251452547353200223510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "fmt" "log" "os" "strings" "sync" "testing" "github.com/go-logr/logr" "github.com/go-logr/logr/funcr" "github.com/go-logr/stdr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" "go.opentelemetry.io/otel/sdk/resource" ) func testSumAggregateOutput(dest *metricdata.Aggregation) int { *dest = metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: false, DataPoints: []metricdata.DataPoint[int64]{{Value: 1}}, } return 1 } func TestNewPipeline(t *testing.T) { pipe := newPipeline(nil, nil, nil) output := metricdata.ResourceMetrics{} err := pipe.produce(context.Background(), &output) require.NoError(t, err) assert.Equal(t, resource.Empty(), output.Resource) assert.Len(t, output.ScopeMetrics, 0) iSync := instrumentSync{"name", "desc", "1", testSumAggregateOutput} assert.NotPanics(t, func() { pipe.addSync(instrumentation.Scope{}, iSync) }) require.NotPanics(t, func() { pipe.addMultiCallback(func(context.Context) error { return nil }) }) err = pipe.produce(context.Background(), &output) require.NoError(t, err) assert.Equal(t, resource.Empty(), output.Resource) require.Len(t, output.ScopeMetrics, 1) require.Len(t, output.ScopeMetrics[0].Metrics, 1) } func TestPipelineUsesResource(t *testing.T) { res := resource.NewWithAttributes("noSchema", attribute.String("test", "resource")) pipe := newPipeline(res, nil, nil) output := metricdata.ResourceMetrics{} err := pipe.produce(context.Background(), &output) assert.NoError(t, err) assert.Equal(t, res, output.Resource) } func TestPipelineConcurrentSafe(t *testing.T) { pipe := newPipeline(nil, nil, nil) ctx := context.Background() var output metricdata.ResourceMetrics var wg sync.WaitGroup const threads = 2 for i := 0; i < threads; i++ { wg.Add(1) go func() { defer wg.Done() _ = pipe.produce(ctx, &output) }() wg.Add(1) go func(n int) { defer wg.Done() name := fmt.Sprintf("name %d", n) sync := instrumentSync{name, "desc", "1", testSumAggregateOutput} pipe.addSync(instrumentation.Scope{}, sync) }(i) wg.Add(1) go func() { defer wg.Done() pipe.addMultiCallback(func(context.Context) error { return nil }) }() } wg.Wait() } func TestDefaultViewImplicit(t *testing.T) { t.Run("Int64", testDefaultViewImplicit[int64]()) t.Run("Float64", testDefaultViewImplicit[float64]()) } func testDefaultViewImplicit[N int64 | float64]() func(t *testing.T) { inst := Instrument{ Name: "requests", Description: "count of requests received", Kind: InstrumentKindCounter, Unit: "1", } return func(t *testing.T) { reader := NewManualReader() tests := []struct { name string pipe *pipeline }{ { name: "NoView", pipe: newPipeline(nil, reader, nil), }, { name: "NoMatchingView", pipe: newPipeline(nil, reader, []View{ NewView(Instrument{Name: "foo"}, Stream{Name: "bar"}), }), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { var c cache[string, instID] i := newInserter[N](test.pipe, &c) readerAggregation := i.readerDefaultAggregation(inst.Kind) got, err := i.Instrument(inst, readerAggregation) require.NoError(t, err) assert.Len(t, got, 1, "default view not applied") for _, in := range got { in(context.Background(), 1, *attribute.EmptySet()) } out := metricdata.ResourceMetrics{} err = test.pipe.produce(context.Background(), &out) require.NoError(t, err) require.Len(t, out.ScopeMetrics, 1, "Aggregator not registered with pipeline") sm := out.ScopeMetrics[0] require.Len(t, sm.Metrics, 1, "metrics not produced from default view") metricdatatest.AssertEqual(t, metricdata.Metrics{ Name: inst.Name, Description: inst.Description, Unit: "1", Data: metricdata.Sum[N]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[N]{{Value: N(1)}}, }, }, sm.Metrics[0], metricdatatest.IgnoreTimestamp()) }) } } } func TestLogConflictName(t *testing.T) { testcases := []struct { existing, name string conflict bool }{ { existing: "requestCount", name: "requestCount", conflict: false, }, { existing: "requestCount", name: "requestDuration", conflict: false, }, { existing: "requestCount", name: "requestcount", conflict: true, }, { existing: "requestCount", name: "REQUESTCOUNT", conflict: true, }, { existing: "requestCount", name: "rEqUeStCoUnT", conflict: true, }, } var msg string t.Cleanup(func(orig logr.Logger) func() { otel.SetLogger(funcr.New(func(_, args string) { msg = args }, funcr.Options{Verbosity: 20})) return func() { otel.SetLogger(orig) } }(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))) for _, tc := range testcases { var vc cache[string, instID] name := strings.ToLower(tc.existing) _ = vc.Lookup(name, func() instID { return instID{Name: tc.existing} }) i := newInserter[int64](newPipeline(nil, nil, nil), &vc) i.logConflict(instID{Name: tc.name}) if tc.conflict { assert.Containsf( t, msg, "duplicate metric stream definitions", "warning not logged for conflicting names: %s, %s", tc.existing, tc.name, ) } else { assert.Equalf( t, msg, "", "warning logged for non-conflicting names: %s, %s", tc.existing, tc.name, ) } // Reset. msg = "" } } func TestLogConflictSuggestView(t *testing.T) { var msg string t.Cleanup(func(orig logr.Logger) func() { otel.SetLogger(funcr.New(func(_, args string) { msg = args }, funcr.Options{Verbosity: 20})) return func() { otel.SetLogger(orig) } }(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))) orig := instID{ Name: "requestCount", Description: "number of requests", Kind: InstrumentKindCounter, Unit: "1", Number: "int64", } var vc cache[string, instID] name := strings.ToLower(orig.Name) _ = vc.Lookup(name, func() instID { return orig }) i := newInserter[int64](newPipeline(nil, nil, nil), &vc) viewSuggestion := func(inst instID, stream string) string { return `"NewView(Instrument{` + `Name: \"` + inst.Name + `\", Description: \"` + inst.Description + `\", Kind: \"InstrumentKind` + inst.Kind.String() + `\", Unit: \"` + inst.Unit + `\"}, ` + stream + `)"` } t.Run("Name", func(t *testing.T) { inst := instID{ Name: "requestcount", Description: orig.Description, Kind: orig.Kind, Unit: orig.Unit, Number: orig.Number, } i.logConflict(inst) assert.Containsf(t, msg, viewSuggestion( inst, `Stream{Name: \"{{NEW_NAME}}\"}`, ), "no suggestion logged: %v", inst) // Reset. msg = "" }) t.Run("Description", func(t *testing.T) { inst := instID{ Name: orig.Name, Description: "alt", Kind: orig.Kind, Unit: orig.Unit, Number: orig.Number, } i.logConflict(inst) assert.Containsf(t, msg, viewSuggestion( inst, `Stream{Description: \"`+orig.Description+`\"}`, ), "no suggestion logged: %v", inst) // Reset. msg = "" }) t.Run("Kind", func(t *testing.T) { inst := instID{ Name: orig.Name, Description: orig.Description, Kind: InstrumentKindHistogram, Unit: orig.Unit, Number: orig.Number, } i.logConflict(inst) assert.Containsf(t, msg, viewSuggestion( inst, `Stream{Name: \"{{NEW_NAME}}\"}`, ), "no suggestion logged: %v", inst) // Reset. msg = "" }) t.Run("Unit", func(t *testing.T) { inst := instID{ Name: orig.Name, Description: orig.Description, Kind: orig.Kind, Unit: "ms", Number: orig.Number, } i.logConflict(inst) assert.NotContains(t, msg, "NewView", "suggestion logged: %v", inst) // Reset. msg = "" }) t.Run("Number", func(t *testing.T) { inst := instID{ Name: orig.Name, Description: orig.Description, Kind: orig.Kind, Unit: orig.Unit, Number: "float64", } i.logConflict(inst) assert.NotContains(t, msg, "NewView", "suggestion logged: %v", inst) // Reset. msg = "" }) } func TestInserterCachedAggregatorNameConflict(t *testing.T) { const name = "requestCount" scope := instrumentation.Scope{Name: "pipeline_test"} kind := InstrumentKindCounter stream := Stream{ Name: name, Aggregation: AggregationSum{}, } var vc cache[string, instID] pipe := newPipeline(nil, NewManualReader(), nil) i := newInserter[int64](pipe, &vc) readerAggregation := i.readerDefaultAggregation(kind) _, origID, err := i.cachedAggregator(scope, kind, stream, readerAggregation) require.NoError(t, err) require.Len(t, pipe.aggregations, 1) require.Contains(t, pipe.aggregations, scope) iSync := pipe.aggregations[scope] require.Len(t, iSync, 1) require.Equal(t, name, iSync[0].name) stream.Name = "RequestCount" _, id, err := i.cachedAggregator(scope, kind, stream, readerAggregation) require.NoError(t, err) assert.Equal(t, origID, id, "multiple aggregators for equivalent name") assert.Len(t, pipe.aggregations, 1, "additional scope added") require.Contains(t, pipe.aggregations, scope, "original scope removed") iSync = pipe.aggregations[scope] require.Len(t, iSync, 1, "registered instrumentSync changed") assert.Equal(t, name, iSync[0].name, "stream name changed") } opentelemetry-go-1.21.0/sdk/metric/provider.go000066400000000000000000000121631452547353200213340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "sync/atomic" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/sdk/instrumentation" ) // MeterProvider handles the creation and coordination of Meters. All Meters // created by a MeterProvider will be associated with the same Resource, have // the same Views applied to them, and have their produced metric telemetry // passed to the configured Readers. type MeterProvider struct { embedded.MeterProvider pipes pipelines meters cache[instrumentation.Scope, *meter] forceFlush, shutdown func(context.Context) error stopped atomic.Bool } // Compile-time check MeterProvider implements metric.MeterProvider. var _ metric.MeterProvider = (*MeterProvider)(nil) // NewMeterProvider returns a new and configured MeterProvider. // // By default, the returned MeterProvider is configured with the default // Resource and no Readers. Readers cannot be added after a MeterProvider is // created. This means the returned MeterProvider, one created with no // Readers, will perform no operations. func NewMeterProvider(options ...Option) *MeterProvider { conf := newConfig(options) flush, sdown := conf.readerSignals() mp := &MeterProvider{ pipes: newPipelines(conf.res, conf.readers, conf.views), forceFlush: flush, shutdown: sdown, } // Log after creation so all readers show correctly they are registered. global.Info("MeterProvider created", "Resource", conf.res, "Readers", conf.readers, "Views", len(conf.views), ) return mp } // Meter returns a Meter with the given name and configured with options. // // The name should be the name of the instrumentation scope creating // telemetry. This name may be the same as the instrumented code only if that // code provides built-in instrumentation. // // Calls to the Meter method after Shutdown has been called will return Meters // that perform no operations. // // This method is safe to call concurrently. func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter { if name == "" { global.Warn("Invalid Meter name.", "name", name) } if mp.stopped.Load() { return noop.Meter{} } c := metric.NewMeterConfig(options...) s := instrumentation.Scope{ Name: name, Version: c.InstrumentationVersion(), SchemaURL: c.SchemaURL(), } global.Info("Meter created", "Name", s.Name, "Version", s.Version, "SchemaURL", s.SchemaURL, ) return mp.meters.Lookup(s, func() *meter { return newMeter(s, mp.pipes) }) } // ForceFlush flushes all pending telemetry. // // This method honors the deadline or cancellation of ctx. An appropriate // error will be returned in these situations. There is no guaranteed that all // telemetry be flushed or all resources have been released in these // situations. // // ForceFlush calls ForceFlush(context.Context) error // on all Readers that implements this method. // // This method is safe to call concurrently. func (mp *MeterProvider) ForceFlush(ctx context.Context) error { if mp.forceFlush != nil { return mp.forceFlush(ctx) } return nil } // Shutdown shuts down the MeterProvider flushing all pending telemetry and // releasing any held computational resources. // // This call is idempotent. The first call will perform all flush and // releasing operations. Subsequent calls will perform no action and will // return an error stating this. // // Measurements made by instruments from meters this MeterProvider created // will not be exported after Shutdown is called. // // This method honors the deadline or cancellation of ctx. An appropriate // error will be returned in these situations. There is no guaranteed that all // telemetry be flushed or all resources have been released in these // situations. // // This method is safe to call concurrently. func (mp *MeterProvider) Shutdown(ctx context.Context) error { // Even though it may seem like there is a synchronization issue between the // call to `Store` and checking `shutdown`, the Go concurrency model ensures // that is not the case, as all the atomic operations executed in a program // behave as though executed in some sequentially consistent order. This // definition provides the same semantics as C++'s sequentially consistent // atomics and Java's volatile variables. // See https://go.dev/ref/mem#atomic and https://pkg.go.dev/sync/atomic. mp.stopped.Store(true) if mp.shutdown != nil { return mp.shutdown(ctx) } return nil } opentelemetry-go-1.21.0/sdk/metric/provider_test.go000066400000000000000000000111151452547353200223670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "context" "fmt" "strings" "testing" "github.com/go-logr/logr/funcr" "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" api "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/noop" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) func TestMeterConcurrentSafe(t *testing.T) { const name = "TestMeterConcurrentSafe meter" mp := NewMeterProvider() done := make(chan struct{}) go func() { defer close(done) _ = mp.Meter(name) }() _ = mp.Meter(name) <-done } func TestForceFlushConcurrentSafe(t *testing.T) { mp := NewMeterProvider() done := make(chan struct{}) go func() { defer close(done) _ = mp.ForceFlush(context.Background()) }() _ = mp.ForceFlush(context.Background()) <-done } func TestShutdownConcurrentSafe(t *testing.T) { mp := NewMeterProvider() done := make(chan struct{}) go func() { defer close(done) _ = mp.Shutdown(context.Background()) }() _ = mp.Shutdown(context.Background()) <-done } func TestMeterAndShutdownConcurrentSafe(t *testing.T) { const name = "TestMeterAndShutdownConcurrentSafe meter" mp := NewMeterProvider() done := make(chan struct{}) go func() { defer close(done) _ = mp.Shutdown(context.Background()) }() _ = mp.Meter(name) <-done } func TestMeterDoesNotPanicForEmptyMeterProvider(t *testing.T) { mp := MeterProvider{} assert.NotPanics(t, func() { _ = mp.Meter("") }) } func TestForceFlushDoesNotPanicForEmptyMeterProvider(t *testing.T) { mp := MeterProvider{} assert.NotPanics(t, func() { _ = mp.ForceFlush(context.Background()) }) } func TestShutdownDoesNotPanicForEmptyMeterProvider(t *testing.T) { mp := MeterProvider{} assert.NotPanics(t, func() { _ = mp.Shutdown(context.Background()) }) } func TestMeterProviderReturnsSameMeter(t *testing.T) { mp := MeterProvider{} mtr := mp.Meter("") assert.Same(t, mtr, mp.Meter("")) assert.NotSame(t, mtr, mp.Meter("diff")) } func TestEmptyMeterName(t *testing.T) { var buf strings.Builder warnLevel := 1 l := funcr.New(func(prefix, args string) { _, _ = buf.WriteString(fmt.Sprint(prefix, args)) }, funcr.Options{Verbosity: warnLevel}) otel.SetLogger(l) mp := NewMeterProvider() mp.Meter("") assert.Contains(t, buf.String(), `"level"=1 "msg"="Invalid Meter name." "name"=""`) } func TestMeterProviderReturnsNoopMeterAfterShutdown(t *testing.T) { mp := NewMeterProvider() m := mp.Meter("") _, ok := m.(noop.Meter) assert.False(t, ok, "Meter from running MeterProvider is NoOp") require.NoError(t, mp.Shutdown(context.Background())) m = mp.Meter("") _, ok = m.(noop.Meter) assert.Truef(t, ok, "Meter from shutdown MeterProvider is not NoOp: %T", m) } func TestMeterProviderMixingOnRegisterErrors(t *testing.T) { otel.SetLogger(testr.New(t)) rdr0 := NewManualReader() mp0 := NewMeterProvider(WithReader(rdr0)) rdr1 := NewManualReader() mp1 := NewMeterProvider(WithReader(rdr1)) // Meters with the same scope but different MeterProviders. m0 := mp0.Meter("TestMeterProviderMixingOnRegisterErrors") m1 := mp1.Meter("TestMeterProviderMixingOnRegisterErrors") m0Gauge, err := m0.Float64ObservableGauge("float64Gauge") require.NoError(t, err) m1Gauge, err := m1.Int64ObservableGauge("int64Gauge") require.NoError(t, err) _, err = m0.RegisterCallback( func(_ context.Context, o api.Observer) error { o.ObserveFloat64(m0Gauge, 2) // Observe an instrument from a different MeterProvider. o.ObserveInt64(m1Gauge, 1) return nil }, m0Gauge, m1Gauge, ) assert.Error( t, err, "Instrument registered with Meter from different MeterProvider", ) var data metricdata.ResourceMetrics _ = rdr0.Collect(context.Background(), &data) // Only the metrics from mp0 should be produced. assert.Len(t, data.ScopeMetrics, 1) err = rdr1.Collect(context.Background(), &data) assert.NoError(t, err, "Errored when collect should be a noop") assert.Len( t, data.ScopeMetrics, 0, "Metrics produced for instrument collected by different MeterProvider", ) } opentelemetry-go-1.21.0/sdk/metric/reader.go000066400000000000000000000172641452547353200207530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "fmt" "go.opentelemetry.io/otel/sdk/metric/metricdata" ) // errDuplicateRegister is logged by a Reader when an attempt to registered it // more than once occurs. var errDuplicateRegister = fmt.Errorf("duplicate reader registration") // ErrReaderNotRegistered is returned if Collect or Shutdown are called before // the reader is registered with a MeterProvider. var ErrReaderNotRegistered = fmt.Errorf("reader is not registered") // ErrReaderShutdown is returned if Collect or Shutdown are called after a // reader has been Shutdown once. var ErrReaderShutdown = fmt.Errorf("reader is shutdown") // errNonPositiveDuration is logged when an environmental variable // has non-positive value. var errNonPositiveDuration = fmt.Errorf("non-positive duration") // Reader is the interface used between the SDK and an // exporter. Control flow is bi-directional through the // Reader, since the SDK initiates ForceFlush and Shutdown // while the exporter initiates collection. The Register() method here // informs the Reader that it can begin reading, signaling the // start of bi-directional control flow. // // Typically, push-based exporters that are periodic will // implement PeroidicExporter themselves and construct a // PeriodicReader to satisfy this interface. // // Pull-based exporters will typically implement Register // themselves, since they read on demand. // // Warning: methods may be added to this interface in minor releases. type Reader interface { // register registers a Reader with a MeterProvider. // The producer argument allows the Reader to signal the sdk to collect // and send aggregated metric measurements. register(sdkProducer) // temporality reports the Temporality for the instrument kind provided. // // This method needs to be concurrent safe with itself and all the other // Reader methods. temporality(InstrumentKind) metricdata.Temporality // aggregation returns what Aggregation to use for an instrument kind. // // This method needs to be concurrent safe with itself and all the other // Reader methods. aggregation(InstrumentKind) Aggregation // nolint:revive // import-shadow for method scoped by type. // Collect gathers and returns all metric data related to the Reader from // the SDK and stores it in out. An error is returned if this is called // after Shutdown or if out is nil. // // This method needs to be concurrent safe, and the cancellation of the // passed context is expected to be honored. Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Shutdown flushes all metric measurements held in an export pipeline and releases any // held computational resources. // // This deadline or cancellation of the passed context are honored. An appropriate // error will be returned in these situations. There is no guaranteed that all // telemetry be flushed or all resources have been released in these // situations. // // After Shutdown is called, calls to Collect will perform no operation and instead will return // an error indicating the shutdown state. // // This method needs to be concurrent safe. Shutdown(context.Context) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } // sdkProducer produces metrics for a Reader. type sdkProducer interface { // produce returns aggregated metrics from a single collection. // // This method is safe to call concurrently. produce(context.Context, *metricdata.ResourceMetrics) error } // Producer produces metrics for a Reader from an external source. type Producer interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Produce returns aggregated metrics from an external source. // // This method should be safe to call concurrently. Produce(context.Context) ([]metricdata.ScopeMetrics, error) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } // produceHolder is used as an atomic.Value to wrap the non-concrete producer // type. type produceHolder struct { produce func(context.Context, *metricdata.ResourceMetrics) error } // shutdownProducer produces an ErrReaderShutdown error always. type shutdownProducer struct{} // produce returns an ErrReaderShutdown error. func (p shutdownProducer) produce(context.Context, *metricdata.ResourceMetrics) error { return ErrReaderShutdown } // TemporalitySelector selects the temporality to use based on the InstrumentKind. type TemporalitySelector func(InstrumentKind) metricdata.Temporality // DefaultTemporalitySelector is the default TemporalitySelector used if // WithTemporalitySelector is not provided. CumulativeTemporality will be used // for all instrument kinds if this TemporalitySelector is used. func DefaultTemporalitySelector(InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } // AggregationSelector selects the aggregation and the parameters to use for // that aggregation based on the InstrumentKind. // // If the Aggregation returned is nil or DefaultAggregation, the selection from // DefaultAggregationSelector will be used. type AggregationSelector func(InstrumentKind) Aggregation // DefaultAggregationSelector returns the default aggregation and parameters // that will be used to summarize measurement made from an instrument of // InstrumentKind. This AggregationSelector using the following selection // mapping: Counter ⇨ Sum, Observable Counter ⇨ Sum, UpDownCounter ⇨ Sum, // Observable UpDownCounter ⇨ Sum, Observable Gauge ⇨ LastValue, // Histogram ⇨ ExplicitBucketHistogram. func DefaultAggregationSelector(ik InstrumentKind) Aggregation { switch ik { case InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter: return AggregationSum{} case InstrumentKindObservableGauge: return AggregationLastValue{} case InstrumentKindHistogram: return AggregationExplicitBucketHistogram{ Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, NoMinMax: false, } } panic("unknown instrument kind") } // ReaderOption is an option which can be applied to manual or Periodic // readers. type ReaderOption interface { PeriodicReaderOption ManualReaderOption } // WithProducers registers producers as an external Producer of metric data // for this Reader. func WithProducer(p Producer) ReaderOption { return producerOption{p: p} } type producerOption struct { p Producer } // applyManual returns a manualReaderConfig with option applied. func (o producerOption) applyManual(c manualReaderConfig) manualReaderConfig { c.producers = append(c.producers, o.p) return c } // applyPeriodic returns a periodicReaderConfig with option applied. func (o producerOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig { c.producers = append(c.producers, o.p) return c } opentelemetry-go-1.21.0/sdk/metric/reader_test.go000066400000000000000000000235361452547353200220110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "context" "sync" "testing" "time" "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/metric/metricdata" "go.opentelemetry.io/otel/sdk/resource" ) type readerTestSuite struct { suite.Suite Factory func(...ReaderOption) Reader Reader Reader } func (ts *readerTestSuite) SetupSuite() { otel.SetLogger(testr.New(ts.T())) } func (ts *readerTestSuite) TearDownTest() { // Ensure Reader is allowed attempt to clean up. _ = ts.Reader.Shutdown(context.Background()) } func (ts *readerTestSuite) TestErrorForNotRegistered() { ts.Reader = ts.Factory() err := ts.Reader.Collect(context.Background(), &metricdata.ResourceMetrics{}) ts.ErrorIs(err, ErrReaderNotRegistered) } func (ts *readerTestSuite) TestSDKProducer() { ts.Reader = ts.Factory() ts.Reader.register(testSDKProducer{}) m := metricdata.ResourceMetrics{} err := ts.Reader.Collect(context.Background(), &m) ts.NoError(err) ts.Equal(testResourceMetricsA, m) } func (ts *readerTestSuite) TestExternalProducer() { ts.Reader = ts.Factory(WithProducer(testExternalProducer{})) ts.Reader.register(testSDKProducer{}) m := metricdata.ResourceMetrics{} err := ts.Reader.Collect(context.Background(), &m) ts.NoError(err) ts.Equal(testResourceMetricsAB, m) } func (ts *readerTestSuite) TestCollectAfterShutdown() { ts.Reader = ts.Factory(WithProducer(testExternalProducer{})) ctx := context.Background() ts.Reader.register(testSDKProducer{}) ts.Require().NoError(ts.Reader.Shutdown(ctx)) m := metricdata.ResourceMetrics{} err := ts.Reader.Collect(ctx, &m) ts.ErrorIs(err, ErrReaderShutdown) ts.Equal(metricdata.ResourceMetrics{}, m) } func (ts *readerTestSuite) TestShutdownTwice() { ts.Reader = ts.Factory(WithProducer(testExternalProducer{})) ctx := context.Background() ts.Reader.register(testSDKProducer{}) ts.Require().NoError(ts.Reader.Shutdown(ctx)) ts.ErrorIs(ts.Reader.Shutdown(ctx), ErrReaderShutdown) } func (ts *readerTestSuite) TestMultipleRegister() { ts.Reader = ts.Factory() p0 := testSDKProducer{ produceFunc: func(ctx context.Context, rm *metricdata.ResourceMetrics) error { // Differentiate this producer from the second by returning an // error. *rm = testResourceMetricsA return assert.AnError }, } p1 := testSDKProducer{} ts.Reader.register(p0) // This should be ignored. ts.Reader.register(p1) err := ts.Reader.Collect(context.Background(), &metricdata.ResourceMetrics{}) ts.Equal(assert.AnError, err) } func (ts *readerTestSuite) TestExternalProducerPartialSuccess() { ts.Reader = ts.Factory( WithProducer(testExternalProducer{ produceFunc: func(ctx context.Context) ([]metricdata.ScopeMetrics, error) { return []metricdata.ScopeMetrics{}, assert.AnError }, }), WithProducer(testExternalProducer{ produceFunc: func(ctx context.Context) ([]metricdata.ScopeMetrics, error) { return []metricdata.ScopeMetrics{testScopeMetricsB}, nil }, }), ) ts.Reader.register(testSDKProducer{}) m := metricdata.ResourceMetrics{} err := ts.Reader.Collect(context.Background(), &m) ts.Equal(assert.AnError, err) ts.Equal(testResourceMetricsAB, m) } func (ts *readerTestSuite) TestSDKFailureBlocksExternalProducer() { ts.Reader = ts.Factory(WithProducer(testExternalProducer{})) ts.Reader.register(testSDKProducer{ produceFunc: func(ctx context.Context, rm *metricdata.ResourceMetrics) error { *rm = metricdata.ResourceMetrics{} return assert.AnError }, }) m := metricdata.ResourceMetrics{} err := ts.Reader.Collect(context.Background(), &m) ts.Equal(assert.AnError, err) ts.Equal(metricdata.ResourceMetrics{}, m) } func (ts *readerTestSuite) TestMethodConcurrentSafe() { ts.Reader = ts.Factory(WithProducer(testExternalProducer{})) // Requires the race-detector (a default test option for the project). // All reader methods should be concurrent-safe. ts.Reader.register(testSDKProducer{}) ctx := context.Background() var wg sync.WaitGroup const threads = 2 for i := 0; i < threads; i++ { wg.Add(1) go func() { defer wg.Done() _ = ts.Reader.temporality(InstrumentKindCounter) }() wg.Add(1) go func() { defer wg.Done() _ = ts.Reader.aggregation(InstrumentKindCounter) }() wg.Add(1) go func() { defer wg.Done() _ = ts.Reader.Collect(ctx, &metricdata.ResourceMetrics{}) }() if f, ok := ts.Reader.(interface{ ForceFlush(context.Context) error }); ok { wg.Add(1) go func() { defer wg.Done() _ = f.ForceFlush(ctx) }() } wg.Add(1) go func() { defer wg.Done() _ = ts.Reader.Shutdown(ctx) }() } wg.Wait() } func (ts *readerTestSuite) TestShutdownBeforeRegister() { ts.Reader = ts.Factory(WithProducer(testExternalProducer{})) ctx := context.Background() ts.Require().NoError(ts.Reader.Shutdown(ctx)) // Registering after shutdown should not revert the shutdown. ts.Reader.register(testSDKProducer{}) m := metricdata.ResourceMetrics{} err := ts.Reader.Collect(ctx, &m) ts.ErrorIs(err, ErrReaderShutdown) ts.Equal(metricdata.ResourceMetrics{}, m) } func (ts *readerTestSuite) TestCollectNilResourceMetricError() { ts.Reader = ts.Factory() ctx := context.Background() ts.Assert().Error(ts.Reader.Collect(ctx, nil)) } var testScopeMetricsA = metricdata.ScopeMetrics{ Scope: instrumentation.Scope{Name: "sdk/metric/test/reader"}, Metrics: []metricdata.Metrics{{ Name: "fake data", Description: "Data used to test a reader", Unit: "1", Data: metricdata.Sum[int64]{ Temporality: metricdata.CumulativeTemporality, IsMonotonic: true, DataPoints: []metricdata.DataPoint[int64]{{ Attributes: attribute.NewSet(attribute.String("user", "alice")), StartTime: time.Now(), Time: time.Now().Add(time.Second), Value: -1, }}, }, }}, } var testScopeMetricsB = metricdata.ScopeMetrics{ Scope: instrumentation.Scope{Name: "sdk/metric/test/reader/external"}, Metrics: []metricdata.Metrics{{ Name: "fake scope data", Description: "Data used to test a Producer reader", Unit: "ms", Data: metricdata.Gauge[int64]{ DataPoints: []metricdata.DataPoint[int64]{{ Attributes: attribute.NewSet(attribute.String("user", "ben")), StartTime: time.Now(), Time: time.Now().Add(time.Second), Value: 10, }}, }, }}, } var testResourceMetricsA = metricdata.ResourceMetrics{ Resource: resource.NewSchemaless(attribute.String("test", "Reader")), ScopeMetrics: []metricdata.ScopeMetrics{testScopeMetricsA}, } var testResourceMetricsAB = metricdata.ResourceMetrics{ Resource: resource.NewSchemaless(attribute.String("test", "Reader")), ScopeMetrics: []metricdata.ScopeMetrics{testScopeMetricsA, testScopeMetricsB}, } type testSDKProducer struct { produceFunc func(context.Context, *metricdata.ResourceMetrics) error } func (p testSDKProducer) produce(ctx context.Context, rm *metricdata.ResourceMetrics) error { if p.produceFunc != nil { return p.produceFunc(ctx, rm) } *rm = testResourceMetricsA return nil } type testExternalProducer struct { produceFunc func(context.Context) ([]metricdata.ScopeMetrics, error) } func (p testExternalProducer) Produce(ctx context.Context) ([]metricdata.ScopeMetrics, error) { if p.produceFunc != nil { return p.produceFunc(ctx) } return []metricdata.ScopeMetrics{testScopeMetricsB}, nil } func benchReaderCollectFunc(r Reader) func(*testing.B) { ctx := context.Background() r.register(testSDKProducer{}) // Store benchmark results in a closure to prevent the compiler from // inlining and skipping the function. var ( collectedMetrics metricdata.ResourceMetrics err error ) return func(b *testing.B) { b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { err = r.Collect(ctx, &collectedMetrics) assert.Equalf(b, testResourceMetricsA, collectedMetrics, "unexpected Collect response: (%#v, %v)", collectedMetrics, err) } } } func TestDefaultAggregationSelector(t *testing.T) { var undefinedInstrument InstrumentKind assert.Panics(t, func() { DefaultAggregationSelector(undefinedInstrument) }) iKinds := []InstrumentKind{ InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindHistogram, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, } for _, ik := range iKinds { assert.NoError(t, DefaultAggregationSelector(ik).err(), ik) } } func TestDefaultTemporalitySelector(t *testing.T) { var undefinedInstrument InstrumentKind for _, ik := range []InstrumentKind{ undefinedInstrument, InstrumentKindCounter, InstrumentKindUpDownCounter, InstrumentKindHistogram, InstrumentKindObservableCounter, InstrumentKindObservableUpDownCounter, InstrumentKindObservableGauge, } { assert.Equal(t, metricdata.CumulativeTemporality, DefaultTemporalitySelector(ik)) } } type notComparable [0]func() // nolint:unused // non-comparable type itself is used. type noCompareReader struct { notComparable // nolint:unused // non-comparable type itself is used. Reader } func TestReadersNotRequiredToBeComparable(t *testing.T) { r := noCompareReader{Reader: NewManualReader()} assert.NotPanics(t, func() { _ = NewMeterProvider(WithReader(r)) }) } opentelemetry-go-1.21.0/sdk/metric/version.go000066400000000000000000000014051452547353200211640ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { return "1.21.0" } opentelemetry-go-1.21.0/sdk/metric/version_test.go000066400000000000000000000021001452547353200222140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "regexp" "testing" "github.com/stretchr/testify/assert" ) // regex taken from https://github.com/Masterminds/semver/tree/v3.1.1 var versionRegex = regexp.MustCompile(`^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)` + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)` + `(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`) func TestVersionSemver(t *testing.T) { v := version() assert.Regexp(t, versionRegex, v) } opentelemetry-go-1.21.0/sdk/metric/view.go000066400000000000000000000100571452547353200204540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "errors" "regexp" "strings" "go.opentelemetry.io/otel/internal/global" ) var ( errMultiInst = errors.New("name replacement for multiple instruments") errEmptyView = errors.New("no criteria provided for view") emptyView = func(Instrument) (Stream, bool) { return Stream{}, false } ) // View is an override to the default behavior of the SDK. It defines how data // should be collected for certain instruments. It returns true and the exact // Stream to use for matching Instruments. Otherwise, if the view does not // match, false is returned. type View func(Instrument) (Stream, bool) // NewView returns a View that applies the Stream mask for all instruments that // match criteria. The returned View will only apply mask if all non-zero-value // fields of criteria match the corresponding Instrument passed to the view. If // no criteria are provided, all field of criteria are their zero-values, a // view that matches no instruments is returned. If you need to match a // zero-value field, create a View directly. // // The Name field of criteria supports wildcard pattern matching. The "*" // wildcard is recognized as matching zero or more characters, and "?" is // recognized as matching exactly one character. For example, a pattern of "*" // matches all instrument names. // // The Stream mask only applies updates for non-zero-value fields. By default, // the Instrument the View matches against will be use for the Name, // Description, and Unit of the returned Stream and no Aggregation or // AttributeFilter are set. All non-zero-value fields of mask are used instead // of the default. If you need to zero out an Stream field returned from a // View, create a View directly. func NewView(criteria Instrument, mask Stream) View { if criteria.empty() { global.Error( errEmptyView, "dropping view", "mask", mask, ) return emptyView } var matchFunc func(Instrument) bool if strings.ContainsAny(criteria.Name, "*?") { if mask.Name != "" { global.Error( errMultiInst, "dropping view", "criteria", criteria, "mask", mask, ) return emptyView } // Handle branching here in NewView instead of criteria.matches so // criteria.matches remains inlinable for the simple case. pattern := regexp.QuoteMeta(criteria.Name) pattern = "^" + pattern + "$" pattern = strings.ReplaceAll(pattern, `\?`, ".") pattern = strings.ReplaceAll(pattern, `\*`, ".*") re := regexp.MustCompile(pattern) matchFunc = func(i Instrument) bool { return re.MatchString(i.Name) && criteria.matchesDescription(i) && criteria.matchesKind(i) && criteria.matchesUnit(i) && criteria.matchesScope(i) } } else { matchFunc = criteria.matches } var agg Aggregation if mask.Aggregation != nil { agg = mask.Aggregation.copy() if err := agg.err(); err != nil { global.Error( err, "not using aggregation with view", "criteria", criteria, "mask", mask, ) agg = nil } } return func(i Instrument) (Stream, bool) { if matchFunc(i) { return Stream{ Name: nonZero(mask.Name, i.Name), Description: nonZero(mask.Description, i.Description), Unit: nonZero(mask.Unit, i.Unit), Aggregation: agg, AttributeFilter: mask.AttributeFilter, }, true } return Stream{}, false } } // nonZero returns v if it is non-zero-valued, otherwise alt. func nonZero[T comparable](v, alt T) T { var zero T if v != zero { return v } return alt } opentelemetry-go-1.21.0/sdk/metric/view_test.go000066400000000000000000000315401452547353200215130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric // import "go.opentelemetry.io/otel/sdk/metric" import ( "testing" "github.com/go-logr/logr" "github.com/go-logr/logr/funcr" "github.com/go-logr/logr/testr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" ) var ( schemaURL = "https://opentelemetry.io/schemas/1.0.0" completeIP = Instrument{ Name: "foo", Description: "foo desc", Kind: InstrumentKindCounter, Unit: "By", Scope: instrumentation.Scope{ Name: "TestNewViewMatch", Version: "v0.1.0", SchemaURL: schemaURL, }, } ) func scope(name, ver, url string) instrumentation.Scope { return instrumentation.Scope{Name: name, Version: ver, SchemaURL: url} } func testNewViewMatchName() func(t *testing.T) { tests := []struct { name string criteria string match []string notMatch []string }{ { name: "Exact", criteria: "foo", match: []string{"foo"}, notMatch: []string{"", "bar", "foobar", "barfoo", "ffooo"}, }, { name: "Wildcard/*", criteria: "*", match: []string{"", "foo", "foobar", "barfoo", "barfoobaz"}, }, { name: "Wildcard/Front?", criteria: "?oo", match: []string{"foo", "1oo"}, notMatch: []string{"", "bar", "foobar", "barfoo", "barfoobaz"}, }, { name: "Wildcard/Back?", criteria: "fo?", match: []string{"foo", "fo1"}, notMatch: []string{"", "bar", "foobar", "barfoo", "barfoobaz"}, }, { name: "Wildcard/Front*", criteria: "*foo", match: []string{"foo", "123foo", "barfoo"}, notMatch: []string{"", "bar", "foobar", "barfoobaz"}, }, { name: "Wildcard/Back*", criteria: "foo*", match: []string{"foo", "foo1", "foobar"}, notMatch: []string{"", "bar", "barfoo", "barfoobaz"}, }, { name: "Wildcard/FrontBack*", criteria: "*foo*", match: []string{"foo", "foo1", "1foo", "1foo1", "foobar", "barfoobaz"}, notMatch: []string{"", "bar"}, }, { name: "Wildcard/Front**", criteria: "**foo", match: []string{"foo", "123foo", "barfoo", "afoo"}, notMatch: []string{"", "bar", "foobar", "barfoobaz"}, }, { name: "Wildcard/Back**", criteria: "foo**", match: []string{"foo", "foo1", "fooa", "foobar"}, notMatch: []string{"", "bar", "barfoo", "barfoobaz"}, }, { name: "Wildcard/Front*?", criteria: "*?oo", match: []string{"foo", "123foo", "barfoo", "afoo"}, notMatch: []string{"", "fo", "bar", "foobar", "barfoobaz"}, }, { name: "Wildcard/Back*?", criteria: "fo*?", match: []string{"foo", "foo1", "fooa", "foobar"}, notMatch: []string{"", "bar", "barfoo", "barfoobaz"}, }, { name: "Wildcard/Front?*", criteria: "?*oo", match: []string{"foo", "123foo", "barfoo", "afoo"}, notMatch: []string{"", "oo", "fo", "bar", "foobar", "barfoobaz"}, }, { name: "Wildcard/Back?*", criteria: "fo?*", match: []string{"foo", "foo1", "fooa", "foobar"}, notMatch: []string{"", "fo", "bar", "barfoo", "barfoobaz"}, }, { name: "Wildcard/Middle*", criteria: "f*o", match: []string{"fo", "foo", "fooo", "fo12baro"}, notMatch: []string{"", "bar", "barfoo", "barfoobaz"}, }, { name: "Wildcard/Middle?", criteria: "f?o", match: []string{"foo", "f1o"}, notMatch: []string{"", "fo", "fooo", "fo12baro", "bar"}, }, { name: "Wildcard/MetaCharacters", criteria: "*.+()|[]{}^$-_?", match: []string{"aa.+()|[]{}^$-_b", ".+()|[]{}^$-_b"}, notMatch: []string{"", "foo", ".+()|[]{}^$-_"}, }, } return func(t *testing.T) { for _, test := range tests { v := NewView(Instrument{Name: test.criteria}, Stream{}) t.Run(test.name, func(t *testing.T) { for _, n := range test.match { _, matches := v(Instrument{Name: n}) assert.Truef(t, matches, "%s does not match %s", test.criteria, n) } for _, n := range test.notMatch { _, matches := v(Instrument{Name: n}) assert.Falsef(t, matches, "%s matches %s", test.criteria, n) } }) } } } func TestNewViewMatch(t *testing.T) { // Avoid boilerplate for name match testing. t.Run("Name", testNewViewMatchName()) tests := []struct { name string criteria Instrument matches []Instrument notMatches []Instrument }{ { name: "Empty", notMatches: []Instrument{{}, {Name: "foo"}, completeIP}, }, { name: "Description", criteria: Instrument{Description: "foo desc"}, matches: []Instrument{{Description: "foo desc"}, completeIP}, notMatches: []Instrument{{}, {Description: "foo"}, {Description: "desc"}}, }, { name: "Kind", criteria: Instrument{Kind: InstrumentKindCounter}, matches: []Instrument{{Kind: InstrumentKindCounter}, completeIP}, notMatches: []Instrument{ {}, {Kind: InstrumentKindUpDownCounter}, {Kind: InstrumentKindHistogram}, {Kind: InstrumentKindObservableCounter}, {Kind: InstrumentKindObservableUpDownCounter}, {Kind: InstrumentKindObservableGauge}, }, }, { name: "Unit", criteria: Instrument{Unit: "By"}, matches: []Instrument{{Unit: "By"}, completeIP}, notMatches: []Instrument{ {}, {Unit: "1"}, {Unit: "K"}, }, }, { name: "ScopeName", criteria: Instrument{Scope: scope("TestNewViewMatch", "", "")}, matches: []Instrument{ {Scope: scope("TestNewViewMatch", "", "")}, completeIP, }, notMatches: []Instrument{ {}, {Scope: scope("PrefixTestNewViewMatch", "", "")}, {Scope: scope("TestNewViewMatchSuffix", "", "")}, {Scope: scope("alt", "", "")}, }, }, { name: "ScopeVersion", criteria: Instrument{Scope: scope("", "v0.1.0", "")}, matches: []Instrument{ {Scope: scope("", "v0.1.0", "")}, completeIP, }, notMatches: []Instrument{ {}, {Scope: scope("", "v0.1.0-RC1", "")}, {Scope: scope("", "v0.1.1", "")}, }, }, { name: "ScopeSchemaURL", criteria: Instrument{Scope: scope("", "", schemaURL)}, matches: []Instrument{ {Scope: scope("", "", schemaURL)}, completeIP, }, notMatches: []Instrument{ {}, {Scope: scope("", "", schemaURL+"/path")}, {Scope: scope("", "", "https://go.dev")}, }, }, { name: "Scope", criteria: Instrument{Scope: scope("TestNewViewMatch", "v0.1.0", schemaURL)}, matches: []Instrument{ {Scope: scope("TestNewViewMatch", "v0.1.0", schemaURL)}, completeIP, }, notMatches: []Instrument{ {}, {Scope: scope("CompleteMisMatch", "v0.2.0", "https://go.dev")}, {Scope: scope("NameMisMatch", "v0.1.0", schemaURL)}, }, }, { name: "Complete", criteria: completeIP, matches: []Instrument{completeIP}, notMatches: []Instrument{ {}, {Name: "foo"}, { Name: "Wrong Name", Description: "foo desc", Kind: InstrumentKindCounter, Unit: "By", Scope: scope("TestNewViewMatch", "v0.1.0", schemaURL), }, { Name: "foo", Description: "Wrong Description", Kind: InstrumentKindCounter, Unit: "By", Scope: scope("TestNewViewMatch", "v0.1.0", schemaURL), }, { Name: "foo", Description: "foo desc", Kind: InstrumentKindObservableUpDownCounter, Unit: "By", Scope: scope("TestNewViewMatch", "v0.1.0", schemaURL), }, { Name: "foo", Description: "foo desc", Kind: InstrumentKindCounter, Unit: "1", Scope: scope("TestNewViewMatch", "v0.1.0", schemaURL), }, { Name: "foo", Description: "foo desc", Kind: InstrumentKindCounter, Unit: "By", Scope: scope("Wrong Scope Name", "v0.1.0", schemaURL), }, { Name: "foo", Description: "foo desc", Kind: InstrumentKindCounter, Unit: "By", Scope: scope("TestNewViewMatch", "v1.4.3", schemaURL), }, { Name: "foo", Description: "foo desc", Kind: InstrumentKindCounter, Unit: "By", Scope: scope("TestNewViewMatch", "v0.1.0", "https://go.dev"), }, }, }, } for _, test := range tests { v := NewView(test.criteria, Stream{}) t.Run(test.name, func(t *testing.T) { for _, instrument := range test.matches { _, matches := v(instrument) assert.Truef(t, matches, "view does not match %#v", instrument) } for _, instrument := range test.notMatches { _, matches := v(instrument) assert.Falsef(t, matches, "view matches %#v", instrument) } }) } } func TestNewViewReplace(t *testing.T) { alt := "alternative value" tests := []struct { name string mask Stream want func(Instrument) Stream }{ { name: "Nothing", want: func(i Instrument) Stream { return Stream{ Name: i.Name, Description: i.Description, Unit: i.Unit, } }, }, { name: "Name", mask: Stream{Name: alt}, want: func(i Instrument) Stream { return Stream{ Name: alt, Description: i.Description, Unit: i.Unit, } }, }, { name: "Description", mask: Stream{Description: alt}, want: func(i Instrument) Stream { return Stream{ Name: i.Name, Description: alt, Unit: i.Unit, } }, }, { name: "Unit", mask: Stream{Unit: "1"}, want: func(i Instrument) Stream { return Stream{ Name: i.Name, Description: i.Description, Unit: "1", } }, }, { name: "Aggregation", mask: Stream{Aggregation: AggregationLastValue{}}, want: func(i Instrument) Stream { return Stream{ Name: i.Name, Description: i.Description, Unit: i.Unit, Aggregation: AggregationLastValue{}, } }, }, { name: "Complete", mask: Stream{ Name: alt, Description: alt, Unit: "1", Aggregation: AggregationLastValue{}, }, want: func(i Instrument) Stream { return Stream{ Name: alt, Description: alt, Unit: "1", Aggregation: AggregationLastValue{}, } }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { got, match := NewView(completeIP, test.mask)(completeIP) require.True(t, match, "view did not match exact criteria") assert.Equal(t, test.want(completeIP), got) }) } // Go does not allow for the comparison of function values, even their // addresses. Therefore, the AttributeFilter field needs an alternative // testing strategy. t.Run("AttributeFilter", func(t *testing.T) { allowed := attribute.String("key", "val") filter := func(kv attribute.KeyValue) bool { return kv == allowed } mask := Stream{AttributeFilter: filter} got, match := NewView(completeIP, mask)(completeIP) require.True(t, match, "view did not match exact criteria") require.NotNil(t, got.AttributeFilter, "AttributeFilter not set") assert.True(t, got.AttributeFilter(allowed), "wrong AttributeFilter") other := attribute.String("key", "other val") assert.False(t, got.AttributeFilter(other), "wrong AttributeFilter") }) } type badAgg struct { e error } func (a badAgg) copy() Aggregation { return a } func (a badAgg) err() error { return a.e } func TestNewViewAggregationErrorLogged(t *testing.T) { tLog := testr.NewWithOptions(t, testr.Options{Verbosity: 6}) l := &logCounter{LogSink: tLog.GetSink()} otel.SetLogger(logr.New(l)) agg := badAgg{e: assert.AnError} mask := Stream{Aggregation: agg} got, match := NewView(completeIP, mask)(completeIP) require.True(t, match, "view did not match exact criteria") assert.Nil(t, got.Aggregation, "erroring aggregation used") assert.Equal(t, 1, l.ErrorN()) } func TestNewViewEmptyViewErrorLogged(t *testing.T) { var got string otel.SetLogger(funcr.New(func(_, args string) { got = args }, funcr.Options{Verbosity: 6})) _ = NewView(Instrument{}, Stream{}) assert.Contains(t, got, errEmptyView.Error()) } func TestNewViewMultiInstMatchErrorLogged(t *testing.T) { var got string otel.SetLogger(funcr.New(func(_, args string) { got = args }, funcr.Options{Verbosity: 6})) _ = NewView(Instrument{ Name: "*", // Wildcard match name (multiple instruments). }, Stream{ Name: "non-empty", }) assert.Contains(t, got, errMultiInst.Error()) } opentelemetry-go-1.21.0/sdk/resource/000077500000000000000000000000001452547353200175145ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/resource/auto.go000066400000000000000000000060501452547353200210140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "errors" "fmt" "strings" ) // ErrPartialResource is returned by a detector when complete source // information for a Resource is unavailable or the source information // contains invalid values that are omitted from the returned Resource. var ErrPartialResource = errors.New("partial resource") // Detector detects OpenTelemetry resource information. type Detector interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Detect returns an initialized Resource based on gathered information. // If the source information to construct a Resource contains invalid // values, a Resource is returned with the valid parts of the source // information used for initialization along with an appropriately // wrapped ErrPartialResource error. Detect(ctx context.Context) (*Resource, error) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } // Detect calls all input detectors sequentially and merges each result with the previous one. // It returns the merged error too. func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { r := new(Resource) return r, detect(ctx, r, detectors) } // detect runs all detectors using ctx and merges the result into res. This // assumes res is allocated and not nil, it will panic otherwise. func detect(ctx context.Context, res *Resource, detectors []Detector) error { var ( r *Resource errs detectErrs err error ) for _, detector := range detectors { if detector == nil { continue } r, err = detector.Detect(ctx) if err != nil { errs = append(errs, err) if !errors.Is(err, ErrPartialResource) { continue } } r, err = Merge(res, r) if err != nil { errs = append(errs, err) } *res = *r } if len(errs) == 0 { return nil } return errs } type detectErrs []error func (e detectErrs) Error() string { errStr := make([]string, len(e)) for i, err := range e { errStr[i] = fmt.Sprintf("* %s", err) } format := "%d errors occurred detecting resource:\n\t%s" return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) } func (e detectErrs) Unwrap() error { switch len(e) { case 0: return nil case 1: return e[0] } return e[1:] } func (e detectErrs) Is(target error) bool { return len(e) != 0 && errors.Is(e[0], target) } opentelemetry-go-1.21.0/sdk/resource/auto_test.go000066400000000000000000000032421452547353200220530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "context" "fmt" "os" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) func TestDetect(t *testing.T) { cases := []struct { name string schema1, schema2 string isErr bool }{ { name: "different schema urls", schema1: "https://opentelemetry.io/schemas/1.3.0", schema2: "https://opentelemetry.io/schemas/1.4.0", isErr: true, }, { name: "same schema url", schema1: "https://opentelemetry.io/schemas/1.4.0", schema2: "https://opentelemetry.io/schemas/1.4.0", isErr: false, }, } for _, c := range cases { t.Run(fmt.Sprintf("case-%s", c.name), func(t *testing.T) { d1 := resource.StringDetector(c.schema1, semconv.HostNameKey, os.Hostname) d2 := resource.StringDetector(c.schema2, semconv.HostNameKey, os.Hostname) r, err := resource.Detect(context.Background(), d1, d2) assert.NotNil(t, r) if c.isErr { assert.Error(t, err) } else { assert.NoError(t, err) } }) } } opentelemetry-go-1.21.0/sdk/resource/benchmark_test.go000066400000000000000000000040461452547353200230400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "fmt" "math/rand" "testing" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" ) const conflict = 0.5 func makeAttrs(n int) (_, _ *resource.Resource) { used := map[string]bool{} l1 := make([]attribute.KeyValue, n) l2 := make([]attribute.KeyValue, n) for i := 0; i < n; i++ { var k string for { k = fmt.Sprint("k", rand.Intn(1000000000)) if !used[k] { used[k] = true break } } l1[i] = attribute.String(k, fmt.Sprint("v", rand.Intn(1000000000))) if rand.Float64() < conflict { l2[i] = l1[i] } else { l2[i] = attribute.String(k, fmt.Sprint("v", rand.Intn(1000000000))) } } return resource.NewSchemaless(l1...), resource.NewSchemaless(l2...) } func benchmarkMergeResource(b *testing.B, size int) { r1, r2 := makeAttrs(size) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = resource.Merge(r1, r2) } } func BenchmarkMergeResource_1(b *testing.B) { benchmarkMergeResource(b, 1) } func BenchmarkMergeResource_2(b *testing.B) { benchmarkMergeResource(b, 2) } func BenchmarkMergeResource_3(b *testing.B) { benchmarkMergeResource(b, 3) } func BenchmarkMergeResource_4(b *testing.B) { benchmarkMergeResource(b, 4) } func BenchmarkMergeResource_6(b *testing.B) { benchmarkMergeResource(b, 6) } func BenchmarkMergeResource_8(b *testing.B) { benchmarkMergeResource(b, 8) } func BenchmarkMergeResource_16(b *testing.B) { benchmarkMergeResource(b, 16) } opentelemetry-go-1.21.0/sdk/resource/builtin.go000066400000000000000000000066261452547353200215230ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "fmt" "os" "path/filepath" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) type ( // telemetrySDK is a Detector that provides information about // the OpenTelemetry SDK used. This Detector is included as a // builtin. If these resource attributes are not wanted, use // the WithTelemetrySDK(nil) or WithoutBuiltin() options to // explicitly disable them. telemetrySDK struct{} // host is a Detector that provides information about the host // being run on. This Detector is included as a builtin. If // these resource attributes are not wanted, use the // WithHost(nil) or WithoutBuiltin() options to explicitly // disable them. host struct{} stringDetector struct { schemaURL string K attribute.Key F func() (string, error) } defaultServiceNameDetector struct{} ) var ( _ Detector = telemetrySDK{} _ Detector = host{} _ Detector = stringDetector{} _ Detector = defaultServiceNameDetector{} ) // Detect returns a *Resource that describes the OpenTelemetry SDK used. func (telemetrySDK) Detect(context.Context) (*Resource, error) { return NewWithAttributes( semconv.SchemaURL, semconv.TelemetrySDKName("opentelemetry"), semconv.TelemetrySDKLanguageGo, semconv.TelemetrySDKVersion(sdk.Version()), ), nil } // Detect returns a *Resource that describes the host being run on. func (host) Detect(ctx context.Context) (*Resource, error) { return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) } // StringDetector returns a Detector that will produce a *Resource // containing the string as a value corresponding to k. The resulting Resource // will have the specified schemaURL. func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { return stringDetector{schemaURL: schemaURL, K: k, F: f} } // Detect returns a *Resource that describes the string as a value // corresponding to attribute.Key as well as the specific schemaURL. func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { value, err := sd.F() if err != nil { return nil, fmt.Errorf("%s: %w", string(sd.K), err) } a := sd.K.String(value) if !a.Valid() { return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) } return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil } // Detect implements Detector. func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { return StringDetector( semconv.SchemaURL, semconv.ServiceNameKey, func() (string, error) { executable, err := os.Executable() if err != nil { return "unknown_service:go", nil } return "unknown_service:" + filepath.Base(executable), nil }, ).Detect(ctx) } opentelemetry-go-1.21.0/sdk/resource/builtin_test.go000066400000000000000000000042141452547353200225510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "context" "errors" "fmt" "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" ) func TestBuiltinStringDetector(t *testing.T) { E := fmt.Errorf("no K") res, err := resource.StringDetector("", attribute.Key("K"), func() (string, error) { return "", E }).Detect(context.Background()) require.True(t, errors.Is(err, E)) require.NotEqual(t, E, err) require.Nil(t, res) } func TestStringDetectorErrors(t *testing.T) { tests := []struct { desc string s resource.Detector errContains string }{ { desc: "explicit error from func should be returned", s: resource.StringDetector("", attribute.Key("K"), func() (string, error) { return "", fmt.Errorf("k-is-missing") }), errContains: "k-is-missing", }, { desc: "empty key is an invalid", s: resource.StringDetector("", attribute.Key(""), func() (string, error) { return "not-empty", nil }), errContains: "invalid attribute: \"\" -> \"not-empty\"", }, } for _, test := range tests { res, err := resource.New( context.Background(), resource.WithAttributes(attribute.String("A", "B")), resource.WithDetectors(test.s), ) require.Error(t, err, test.desc) require.Contains(t, err.Error(), test.errContains) require.NotNil(t, res, "resource contains remaining valid entries") m := map[string]string{} for _, kv := range res.Attributes() { m[string(kv.Key)] = kv.Value.Emit() } require.EqualValues(t, map[string]string{"A": "B"}, m) } } opentelemetry-go-1.21.0/sdk/resource/config.go000066400000000000000000000152111452547353200213100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "go.opentelemetry.io/otel/attribute" ) // config contains configuration for Resource creation. type config struct { // detectors that will be evaluated. detectors []Detector // SchemaURL to associate with the Resource. schemaURL string } // Option is the interface that applies a configuration option. type Option interface { // apply sets the Option value of a config. apply(config) config } // WithAttributes adds attributes to the configured Resource. func WithAttributes(attributes ...attribute.KeyValue) Option { return WithDetectors(detectAttributes{attributes}) } type detectAttributes struct { attributes []attribute.KeyValue } func (d detectAttributes) Detect(context.Context) (*Resource, error) { return NewSchemaless(d.attributes...), nil } // WithDetectors adds detectors to be evaluated for the configured resource. func WithDetectors(detectors ...Detector) Option { return detectorsOption{detectors: detectors} } type detectorsOption struct { detectors []Detector } func (o detectorsOption) apply(cfg config) config { cfg.detectors = append(cfg.detectors, o.detectors...) return cfg } // WithFromEnv adds attributes from environment variables to the configured resource. func WithFromEnv() Option { return WithDetectors(fromEnv{}) } // WithHost adds attributes from the host to the configured resource. func WithHost() Option { return WithDetectors(host{}) } // WithHostID adds host ID information to the configured resource. func WithHostID() Option { return WithDetectors(hostIDDetector{}) } // WithTelemetrySDK adds TelemetrySDK version info to the configured resource. func WithTelemetrySDK() Option { return WithDetectors(telemetrySDK{}) } // WithSchemaURL sets the schema URL for the configured resource. func WithSchemaURL(schemaURL string) Option { return schemaURLOption(schemaURL) } type schemaURLOption string func (o schemaURLOption) apply(cfg config) config { cfg.schemaURL = string(o) return cfg } // WithOS adds all the OS attributes to the configured Resource. // See individual WithOS* functions to configure specific attributes. func WithOS() Option { return WithDetectors( osTypeDetector{}, osDescriptionDetector{}, ) } // WithOSType adds an attribute with the operating system type to the configured Resource. func WithOSType() Option { return WithDetectors(osTypeDetector{}) } // WithOSDescription adds an attribute with the operating system description to the // configured Resource. The formatted string is equivalent to the output of the // `uname -snrvm` command. func WithOSDescription() Option { return WithDetectors(osDescriptionDetector{}) } // WithProcess adds all the Process attributes to the configured Resource. // // Warning! This option will include process command line arguments. If these // contain sensitive information it will be included in the exported resource. // // This option is equivalent to calling WithProcessPID, // WithProcessExecutableName, WithProcessExecutablePath, // WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, // WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each // option function for information about what resource attributes each // includes. func WithProcess() Option { return WithDetectors( processPIDDetector{}, processExecutableNameDetector{}, processExecutablePathDetector{}, processCommandArgsDetector{}, processOwnerDetector{}, processRuntimeNameDetector{}, processRuntimeVersionDetector{}, processRuntimeDescriptionDetector{}, ) } // WithProcessPID adds an attribute with the process identifier (PID) to the // configured Resource. func WithProcessPID() Option { return WithDetectors(processPIDDetector{}) } // WithProcessExecutableName adds an attribute with the name of the process // executable to the configured Resource. func WithProcessExecutableName() Option { return WithDetectors(processExecutableNameDetector{}) } // WithProcessExecutablePath adds an attribute with the full path to the process // executable to the configured Resource. func WithProcessExecutablePath() Option { return WithDetectors(processExecutablePathDetector{}) } // WithProcessCommandArgs adds an attribute with all the command arguments (including // the command/executable itself) as received by the process to the configured // Resource. // // Warning! This option will include process command line arguments. If these // contain sensitive information it will be included in the exported resource. func WithProcessCommandArgs() Option { return WithDetectors(processCommandArgsDetector{}) } // WithProcessOwner adds an attribute with the username of the user that owns the process // to the configured Resource. func WithProcessOwner() Option { return WithDetectors(processOwnerDetector{}) } // WithProcessRuntimeName adds an attribute with the name of the runtime of this // process to the configured Resource. func WithProcessRuntimeName() Option { return WithDetectors(processRuntimeNameDetector{}) } // WithProcessRuntimeVersion adds an attribute with the version of the runtime of // this process to the configured Resource. func WithProcessRuntimeVersion() Option { return WithDetectors(processRuntimeVersionDetector{}) } // WithProcessRuntimeDescription adds an attribute with an additional description // about the runtime of the process to the configured Resource. func WithProcessRuntimeDescription() Option { return WithDetectors(processRuntimeDescriptionDetector{}) } // WithContainer adds all the Container attributes to the configured Resource. // See individual WithContainer* functions to configure specific attributes. func WithContainer() Option { return WithDetectors( cgroupContainerIDDetector{}, ) } // WithContainerID adds an attribute with the id of the container to the configured Resource. // Note: WithContainerID will not extract the correct container ID in an ECS environment. // Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). func WithContainerID() Option { return WithDetectors(cgroupContainerIDDetector{}) } opentelemetry-go-1.21.0/sdk/resource/container.go000066400000000000000000000052051452547353200220270ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "bufio" "context" "errors" "io" "os" "regexp" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) type containerIDProvider func() (string, error) var ( containerID containerIDProvider = getContainerIDFromCGroup cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`) ) type cgroupContainerIDDetector struct{} const cgroupPath = "/proc/self/cgroup" // Detect returns a *Resource that describes the id of the container. // If no container id found, an empty resource will be returned. func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { containerID, err := containerID() if err != nil { return nil, err } if containerID == "" { return Empty(), nil } return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil } var ( defaultOSStat = os.Stat osStat = defaultOSStat defaultOSOpen = func(name string) (io.ReadCloser, error) { return os.Open(name) } osOpen = defaultOSOpen ) // getContainerIDFromCGroup returns the id of the container from the cgroup file. // If no container id found, an empty string will be returned. func getContainerIDFromCGroup() (string, error) { if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { // File does not exist, skip return "", nil } file, err := osOpen(cgroupPath) if err != nil { return "", err } defer file.Close() return getContainerIDFromReader(file), nil } // getContainerIDFromReader returns the id of the container from reader. func getContainerIDFromReader(reader io.Reader) string { scanner := bufio.NewScanner(reader) for scanner.Scan() { line := scanner.Text() if id := getContainerIDFromLine(line); id != "" { return id } } return "" } // getContainerIDFromLine returns the id of the container from one string line. func getContainerIDFromLine(line string) string { matches := cgroupContainerIDRe.FindStringSubmatch(line) if len(matches) <= 1 { return "" } return matches[1] } opentelemetry-go-1.21.0/sdk/resource/container_test.go000066400000000000000000000110461452547353200230660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource import ( "errors" "io" "os" "strings" "testing" "github.com/stretchr/testify/assert" ) func setDefaultContainerProviders() { setContainerProviders( getContainerIDFromCGroup, ) } func setContainerProviders( idProvider containerIDProvider, ) { containerID = idProvider } func TestGetContainerIDFromLine(t *testing.T) { testCases := []struct { name string line string expectedContainerID string }{ { name: "with suffix", line: "13:name=systemd:/podruntime/docker/kubepods/ac679f8a8319c8cf7d38e1adf263bc08d23.aaaa", expectedContainerID: "ac679f8a8319c8cf7d38e1adf263bc08d23", }, { name: "with prefix and suffix", line: "13:name=systemd:/podruntime/docker/kubepods/crio-dc679f8a8319c8cf7d38e1adf263bc08d23.stuff", expectedContainerID: "dc679f8a8319c8cf7d38e1adf263bc08d23", }, { name: "no prefix and suffix", line: "13:name=systemd:/pod/d86d75589bf6cc254f3e2cc29debdf85dde404998aa128997a819ff991827356", expectedContainerID: "d86d75589bf6cc254f3e2cc29debdf85dde404998aa128997a819ff991827356", }, { name: "with space", line: " 13:name=systemd:/pod/d86d75589bf6cc254f3e2cc29debdf85dde404998aa128997a819ff991827356 ", expectedContainerID: "d86d75589bf6cc254f3e2cc29debdf85dde404998aa128997a819ff991827356", }, { name: "invalid hex string", line: "13:name=systemd:/podruntime/docker/kubepods/ac679f8a8319c8cf7d38e1adf263bc08d23zzzz", }, { name: "no container id - 1", line: "pids: /", }, { name: "no container id - 2", line: "pids: ", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { containerID := getContainerIDFromLine(tc.line) assert.Equal(t, tc.expectedContainerID, containerID) }) } } func TestGetContainerIDFromReader(t *testing.T) { testCases := []struct { name string reader io.Reader expectedContainerID string }{ { name: "multiple lines", reader: strings.NewReader(`// 1:name=systemd:/podruntime/docker/kubepods/docker-dc579f8a8319c8cf7d38e1adf263bc08d23 1:name=systemd:/podruntime/docker/kubepods/docker-dc579f8a8319c8cf7d38e1adf263bc08d24 `), expectedContainerID: "dc579f8a8319c8cf7d38e1adf263bc08d23", }, { name: "no container id", reader: strings.NewReader(`// 1:name=systemd:/podruntime/docker `), expectedContainerID: "", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { containerID := getContainerIDFromReader(tc.reader) assert.Equal(t, tc.expectedContainerID, containerID) }) } } func TestGetContainerIDFromCGroup(t *testing.T) { t.Cleanup(func() { osStat = defaultOSStat osOpen = defaultOSOpen }) testCases := []struct { name string cgroupFileNotExist bool openFileError error content string expectedContainerID string expectedError bool }{ { name: "the cgroup file does not exist", cgroupFileNotExist: true, }, { name: "error when opening cgroup file", openFileError: errors.New("test"), expectedError: true, }, { name: "cgroup file", content: "1:name=systemd:/podruntime/docker/kubepods/docker-dc579f8a8319c8cf7d38e1adf263bc08d23", expectedContainerID: "dc579f8a8319c8cf7d38e1adf263bc08d23", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { osStat = func(name string) (os.FileInfo, error) { if tc.cgroupFileNotExist { return nil, os.ErrNotExist } return nil, nil } osOpen = func(name string) (io.ReadCloser, error) { if tc.openFileError != nil { return nil, tc.openFileError } return io.NopCloser(strings.NewReader(tc.content)), nil } containerID, err := getContainerIDFromCGroup() assert.Equal(t, tc.expectedError, err != nil) assert.Equal(t, tc.expectedContainerID, containerID) }) } } opentelemetry-go-1.21.0/sdk/resource/doc.go000066400000000000000000000026541452547353200206170ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package resource provides detecting and representing resources. // // The fundamental struct is a Resource which holds identifying information // about the entities for which telemetry is exported. // // To automatically construct Resources from an environment a Detector // interface is defined. Implementations of this interface can be passed to // the Detect function to generate a Resource from the merged information. // // To load a user defined Resource from the environment variable // OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret // the value as a list of comma delimited key/value pairs // (e.g. `=,=,...`). // // While this package provides a stable API, // the attributes added by resource detectors may change. package resource // import "go.opentelemetry.io/otel/sdk/resource" opentelemetry-go-1.21.0/sdk/resource/env.go000066400000000000000000000057641452547353200206470ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "fmt" "net/url" "os" "strings" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) const ( // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials // svcNameKey is the environment variable name that Service Name information will be read from. svcNameKey = "OTEL_SERVICE_NAME" ) // errMissingValue is returned when a resource value is missing. var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) // fromEnv is a Detector that implements the Detector and collects // resources from environment. This Detector is included as a // builtin. type fromEnv struct{} // compile time assertion that FromEnv implements Detector interface. var _ Detector = fromEnv{} // Detect collects resources from environment. func (fromEnv) Detect(context.Context) (*Resource, error) { attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) svcName := strings.TrimSpace(os.Getenv(svcNameKey)) if attrs == "" && svcName == "" { return Empty(), nil } var res *Resource if svcName != "" { res = NewSchemaless(semconv.ServiceName(svcName)) } r2, err := constructOTResources(attrs) // Ensure that the resource with the service name from OTEL_SERVICE_NAME // takes precedence, if it was defined. res, err2 := Merge(r2, res) if err == nil { err = err2 } else if err2 != nil { err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) } return res, err } func constructOTResources(s string) (*Resource, error) { if s == "" { return Empty(), nil } pairs := strings.Split(s, ",") var attrs []attribute.KeyValue var invalid []string for _, p := range pairs { k, v, found := strings.Cut(p, "=") if !found { invalid = append(invalid, p) continue } key := strings.TrimSpace(k) val, err := url.PathUnescape(strings.TrimSpace(v)) if err != nil { // Retain original value if decoding fails, otherwise it will be // an empty string. val = v otel.Handle(err) } attrs = append(attrs, attribute.String(key, val)) } var err error if len(invalid) > 0 { err = fmt.Errorf("%w: %v", errMissingValue, invalid) } return NewSchemaless(attrs...), err } opentelemetry-go-1.21.0/sdk/resource/env_test.go000066400000000000000000000113631452547353200216760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource import ( "context" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" ottest "go.opentelemetry.io/otel/sdk/internal/internaltest" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) func TestDetectOnePair(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ resourceAttrKey: "key=value", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) require.NoError(t, err) assert.Equal(t, NewSchemaless(attribute.String("key", "value")), res) } func TestDetectURIEncodingOnePair(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ resourceAttrKey: "key=x+y+z?q=123", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) require.NoError(t, err) assert.Equal(t, NewSchemaless(attribute.String("key", "x+y+z?q=123")), res) } func TestDetectMultiPairs(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ "x": "1", resourceAttrKey: "key=value, k = v , a= x, a=z, b=c%2Fd", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) require.NoError(t, err) assert.Equal(t, NewSchemaless( attribute.String("key", "value"), attribute.String("k", "v"), attribute.String("a", "x"), attribute.String("a", "z"), attribute.String("b", "c/d"), ), res) } func TestDetectURIEncodingMultiPairs(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ "x": "1", resourceAttrKey: "key=x+y+z,namespace=localhost/test&verify", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) require.NoError(t, err) assert.Equal(t, NewSchemaless( attribute.String("key", "x+y+z"), attribute.String("namespace", "localhost/test&verify"), ), res) } func TestEmpty(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ resourceAttrKey: " ", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) require.NoError(t, err) assert.Equal(t, Empty(), res) } func TestNoResourceAttributesSet(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ svcNameKey: "bar", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) require.NoError(t, err) assert.Equal(t, res, NewSchemaless( semconv.ServiceName("bar"), )) } func TestMissingKeyError(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ resourceAttrKey: "key=value,key", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) assert.Error(t, err) assert.Equal(t, err, fmt.Errorf("%w: %v", errMissingValue, "[key]")) assert.Equal(t, res, NewSchemaless( attribute.String("key", "value"), )) } func TestInvalidPercentDecoding(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ resourceAttrKey: "key=%invalid", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) assert.NoError(t, err) assert.Equal(t, NewSchemaless( attribute.String("key", "%invalid"), ), res) } func TestDetectServiceNameFromEnv(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ resourceAttrKey: "key=value,service.name=foo", svcNameKey: "bar", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() detector := &fromEnv{} res, err := detector.Detect(context.Background()) require.NoError(t, err) assert.Equal(t, res, NewSchemaless( attribute.String("key", "value"), semconv.ServiceName("bar"), )) } opentelemetry-go-1.21.0/sdk/resource/export_common_unix_test.go000066400000000000000000000020021452547353200250300ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" var ( Uname = uname GetFirstAvailableFile = getFirstAvailableFile ) var ( SetUnameProvider = setUnameProvider SetDefaultUnameProvider = setDefaultUnameProvider ) opentelemetry-go-1.21.0/sdk/resource/export_os_release_darwin_test.go000066400000000000000000000013441452547353200261720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" var ( ParsePlistFile = parsePlistFile BuildOSRelease = buildOSRelease ) opentelemetry-go-1.21.0/sdk/resource/export_test.go000066400000000000000000000026331452547353200224270ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" var ( SetDefaultOSProviders = setDefaultOSProviders SetOSProviders = setOSProviders SetDefaultRuntimeProviders = setDefaultRuntimeProviders SetRuntimeProviders = setRuntimeProviders SetDefaultUserProviders = setDefaultUserProviders SetUserProviders = setUserProviders SetDefaultOSDescriptionProvider = setDefaultOSDescriptionProvider SetOSDescriptionProvider = setOSDescriptionProvider SetDefaultContainerProviders = setDefaultContainerProviders SetContainerProviders = setContainerProviders ) var ( CommandArgs = commandArgs RuntimeName = runtimeName RuntimeOS = runtimeOS RuntimeArch = runtimeArch ) var MapRuntimeOSToSemconvOSType = mapRuntimeOSToSemconvOSType opentelemetry-go-1.21.0/sdk/resource/export_unix_test.go000066400000000000000000000017751452547353200235000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" var ( ParseOSReleaseFile = parseOSReleaseFile Skip = skip Parse = parse Unquote = unquote Unescape = unescape BuildOSRelease = buildOSRelease ) opentelemetry-go-1.21.0/sdk/resource/export_windows_test.go000066400000000000000000000021141452547353200241730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" var ( PlatformOSDescription = platformOSDescription ReadProductName = readProductName ReadDisplayVersion = readDisplayVersion ReadReleaseID = readReleaseID ReadCurrentMajorVersionNumber = readCurrentMajorVersionNumber ReadCurrentMinorVersionNumber = readCurrentMinorVersionNumber ReadCurrentBuildNumber = readCurrentBuildNumber ReadUBR = readUBR ) opentelemetry-go-1.21.0/sdk/resource/host_id.go000066400000000000000000000065471452547353200215100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "errors" "strings" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) type hostIDProvider func() (string, error) var defaultHostIDProvider hostIDProvider = platformHostIDReader.read var hostID = defaultHostIDProvider type hostIDReader interface { read() (string, error) } type fileReader func(string) (string, error) type commandExecutor func(string, ...string) (string, error) // hostIDReaderBSD implements hostIDReader. type hostIDReaderBSD struct { execCommand commandExecutor readFile fileReader } // read attempts to read the machine-id from /etc/hostid. If not found it will // execute `kenv -q smbios.system.uuid`. If neither location yields an id an // error will be returned. func (r *hostIDReaderBSD) read() (string, error) { if result, err := r.readFile("/etc/hostid"); err == nil { return strings.TrimSpace(result), nil } if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { return strings.TrimSpace(result), nil } return "", errors.New("host id not found in: /etc/hostid or kenv") } // hostIDReaderDarwin implements hostIDReader. type hostIDReaderDarwin struct { execCommand commandExecutor } // read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id // from the IOPlatformUUID line. If the command fails or the uuid cannot be // parsed an error will be returned. func (r *hostIDReaderDarwin) read() (string, error) { result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") if err != nil { return "", err } lines := strings.Split(result, "\n") for _, line := range lines { if strings.Contains(line, "IOPlatformUUID") { parts := strings.Split(line, " = ") if len(parts) == 2 { return strings.Trim(parts[1], "\""), nil } break } } return "", errors.New("could not parse IOPlatformUUID") } type hostIDReaderLinux struct { readFile fileReader } // read attempts to read the machine-id from /etc/machine-id followed by // /var/lib/dbus/machine-id. If neither location yields an ID an error will // be returned. func (r *hostIDReaderLinux) read() (string, error) { if result, err := r.readFile("/etc/machine-id"); err == nil { return strings.TrimSpace(result), nil } if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { return strings.TrimSpace(result), nil } return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") } type hostIDDetector struct{} // Detect returns a *Resource containing the platform specific host id. func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { hostID, err := hostID() if err != nil { return nil, err } return NewWithAttributes( semconv.SchemaURL, semconv.HostID(hostID), ), nil } opentelemetry-go-1.21.0/sdk/resource/host_id_bsd.go000066400000000000000000000015751452547353200223340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build dragonfly || freebsd || netbsd || openbsd || solaris // +build dragonfly freebsd netbsd openbsd solaris package resource // import "go.opentelemetry.io/otel/sdk/resource" var platformHostIDReader hostIDReader = &hostIDReaderBSD{ execCommand: execCommand, readFile: readFile, } opentelemetry-go-1.21.0/sdk/resource/host_id_darwin.go000066400000000000000000000013641452547353200230440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ execCommand: execCommand, } opentelemetry-go-1.21.0/sdk/resource/host_id_exec.go000066400000000000000000000016601452547353200225030ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris package resource // import "go.opentelemetry.io/otel/sdk/resource" import "os/exec" func execCommand(name string, arg ...string) (string, error) { cmd := exec.Command(name, arg...) b, err := cmd.Output() if err != nil { return "", err } return string(b), nil } opentelemetry-go-1.21.0/sdk/resource/host_id_export_test.go000066400000000000000000000020331452547353200241320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/sdk/resource" ) func mockHostIDProvider() { resource.SetHostIDProvider( func() (string, error) { return "f2c668b579780554f70f72a063dc0864", nil }, ) } func mockHostIDProviderWithError() { resource.SetHostIDProvider( func() (string, error) { return "", assert.AnError }, ) } func restoreHostIDProvider() { resource.SetDefaultHostIDProvider() } opentelemetry-go-1.21.0/sdk/resource/host_id_linux.go000066400000000000000000000014171452547353200227160ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux // +build linux package resource // import "go.opentelemetry.io/otel/sdk/resource" var platformHostIDReader hostIDReader = &hostIDReaderLinux{ readFile: readFile, } opentelemetry-go-1.21.0/sdk/resource/host_id_readfile.go000066400000000000000000000016021452547353200233260ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris package resource // import "go.opentelemetry.io/otel/sdk/resource" import "os" func readFile(filename string) (string, error) { b, err := os.ReadFile(filename) if err != nil { return "", err } return string(b), nil } opentelemetry-go-1.21.0/sdk/resource/host_id_readfile_test.go000066400000000000000000000025701452547353200243720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris package resource import ( "os" "testing" "github.com/stretchr/testify/require" ) func TestReadFileExistent(t *testing.T) { fileContents := "foo" f, err := os.CreateTemp("", "readfile_") require.NoError(t, err) defer os.Remove(f.Name()) _, err = f.WriteString(fileContents) require.NoError(t, err) require.NoError(t, f.Close()) result, err := readFile(f.Name()) require.NoError(t, err) require.Equal(t, result, fileContents) } func TestReadFileNonExistent(t *testing.T) { // create unique filename f, err := os.CreateTemp("", "readfile_") require.NoError(t, err) // make file non-existent require.NoError(t, os.Remove(f.Name())) _, err = readFile(f.Name()) require.ErrorIs(t, err, os.ErrNotExist) } opentelemetry-go-1.21.0/sdk/resource/host_id_test.go000066400000000000000000000142501452547353200225350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource import ( "errors" "testing" "github.com/stretchr/testify/require" ) var ( expectedHostID = "f2c668b579780554f70f72a063dc0864" readFileNoError = func(filename string) (string, error) { return expectedHostID + "\n", nil } readFileError = func(filename string) (string, error) { return "", errors.New("not found") } execCommandNoError = func(string, ...string) (string, error) { return expectedHostID + "\n", nil } execCommandError = func(string, ...string) (string, error) { return "", errors.New("not found") } ) func SetDefaultHostIDProvider() { SetHostIDProvider(defaultHostIDProvider) } func SetHostIDProvider(hostIDProvider hostIDProvider) { hostID = hostIDProvider } func TestHostIDReaderBSD(t *testing.T) { tt := []struct { name string fileReader fileReader commandExecutor commandExecutor expectedHostID string expectError bool }{ { name: "hostIDReaderBSD valid primary", fileReader: readFileNoError, commandExecutor: execCommandError, expectedHostID: expectedHostID, expectError: false, }, { name: "hostIDReaderBSD invalid primary", fileReader: readFileError, commandExecutor: execCommandNoError, expectedHostID: expectedHostID, expectError: false, }, { name: "hostIDReaderBSD invalid primary and secondary", fileReader: readFileError, commandExecutor: execCommandError, expectedHostID: "", expectError: true, }, } for _, tc := range tt { tc := tc t.Run(tc.name, func(t *testing.T) { reader := hostIDReaderBSD{ readFile: tc.fileReader, execCommand: tc.commandExecutor, } hostID, err := reader.read() require.Equal(t, tc.expectError, err != nil) require.Equal(t, tc.expectedHostID, hostID) }) } } func TestHostIDReaderLinux(t *testing.T) { readFilePrimaryError := func(filename string) (string, error) { if filename == "/var/lib/dbus/machine-id" { return readFileNoError(filename) } return readFileError(filename) } tt := []struct { name string fileReader fileReader expectedHostID string expectError bool }{ { name: "hostIDReaderLinux valid primary", fileReader: readFileNoError, expectedHostID: expectedHostID, expectError: false, }, { name: "hostIDReaderLinux invalid primary", fileReader: readFilePrimaryError, expectedHostID: expectedHostID, expectError: false, }, { name: "hostIDReaderLinux invalid primary and secondary", fileReader: readFileError, expectedHostID: "", expectError: true, }, } for _, tc := range tt { tc := tc t.Run(tc.name, func(t *testing.T) { reader := hostIDReaderLinux{ readFile: tc.fileReader, } hostID, err := reader.read() require.Equal(t, tc.expectError, err != nil) require.Equal(t, tc.expectedHostID, hostID) }) } } func TestHostIDReaderDarwin(t *testing.T) { validOutput := `+-o J316sAP { "IOPolledInterface" = "AppleARMWatchdogTimerHibernateHandler is not serializable" "#address-cells" = <02000000> "AAPL,phandle" = <01000000> "serial-number" = <94e1c79ec04cd3f153f600000000000000000000000000000000000000000000> "IOBusyInterest" = "IOCommand is not serializable" "target-type" = <"J316s"> "platform-name" = <7436303030000000000000000000000000000000000000000000000000000000> "secure-root-prefix" = <"md"> "name" = <"device-tree"> "region-info" = <4c4c2f4100000000000000000000000000000000000000000000000000000000> "manufacturer" = <"Apple Inc."> "compatible" = <"J316sAP","MacBookPro18,1","AppleARM"> "config-number" = <00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000> "IOPlatformSerialNumber" = "HDWLIF2LM7" "regulatory-model-number" = <4132343835000000000000000000000000000000000000000000000000000000> "time-stamp" = <"Fri Aug 5 20:25:38 PDT 2022"> "clock-frequency" = <00366e01> "model" = <"MacBookPro18,1"> "mlb-serial-number" = <5c92d268d6cd789e475ffafc0d363fc950000000000000000000000000000000> "model-number" = <5a31345930303136430000000000000000000000000000000000000000000000> "IONWInterrupts" = "IONWInterrupts" "model-config" = <"ICT;MoPED=0x03D053A605C84ED11C455A18D6C643140B41A239"> "device_type" = <"bootrom"> "#size-cells" = <02000000> "IOPlatformUUID" = "81895B8D-9EF9-4EBB-B5DE-B00069CF53F0" } ` execCommandValid := func(string, ...string) (string, error) { return validOutput, nil } execCommandInvalid := func(string, ...string) (string, error) { return "wasn't expecting this", nil } tt := []struct { name string fileReader fileReader commandExecutor commandExecutor expectedHostID string expectError bool }{ { name: "hostIDReaderDarwin valid output", commandExecutor: execCommandValid, expectedHostID: "81895B8D-9EF9-4EBB-B5DE-B00069CF53F0", expectError: false, }, { name: "hostIDReaderDarwin invalid output", commandExecutor: execCommandInvalid, expectedHostID: "", expectError: true, }, { name: "hostIDReaderDarwin error", commandExecutor: execCommandError, expectedHostID: "", expectError: true, }, } for _, tc := range tt { tc := tc t.Run(tc.name, func(t *testing.T) { reader := hostIDReaderDarwin{ execCommand: tc.commandExecutor, } hostID, err := reader.read() require.Equal(t, tc.expectError, err != nil) require.Equal(t, tc.expectedHostID, hostID) }) } } opentelemetry-go-1.21.0/sdk/resource/host_id_unsupported.go000066400000000000000000000023461452547353200241510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !darwin // +build !dragonfly // +build !freebsd // +build !linux // +build !netbsd // +build !openbsd // +build !solaris // +build !windows package resource // import "go.opentelemetry.io/otel/sdk/resource" // hostIDReaderUnsupported is a placeholder implementation for operating systems // for which this project currently doesn't support host.id // attribute detection. See build tags declaration early on this file // for a list of unsupported OSes. type hostIDReaderUnsupported struct{} func (*hostIDReaderUnsupported) read() (string, error) { return "", nil } var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} opentelemetry-go-1.21.0/sdk/resource/host_id_windows.go000066400000000000000000000024551452547353200232540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build windows // +build windows package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "golang.org/x/sys/windows/registry" ) // implements hostIDReader type hostIDReaderWindows struct{} // read reads MachineGuid from the windows registry key: // SOFTWARE\Microsoft\Cryptography func (*hostIDReaderWindows) read() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, registry.QUERY_VALUE|registry.WOW64_64KEY, ) if err != nil { return "", err } defer k.Close() guid, _, err := k.GetStringValue("MachineGuid") if err != nil { return "", err } return guid, nil } var platformHostIDReader hostIDReader = &hostIDReaderWindows{} opentelemetry-go-1.21.0/sdk/resource/host_id_windows_test.go000066400000000000000000000016301452547353200243050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build windows // +build windows package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "testing" "github.com/stretchr/testify/require" ) func TestReader(t *testing.T) { reader := &hostIDReaderWindows{} result, err := reader.read() require.NoError(t, err) require.NotEmpty(t, result) } opentelemetry-go-1.21.0/sdk/resource/os.go000066400000000000000000000060311452547353200204640ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "strings" "go.opentelemetry.io/otel/attribute" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) type osDescriptionProvider func() (string, error) var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription var osDescription = defaultOSDescriptionProvider func setDefaultOSDescriptionProvider() { setOSDescriptionProvider(defaultOSDescriptionProvider) } func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { osDescription = osDescriptionProvider } type ( osTypeDetector struct{} osDescriptionDetector struct{} ) // Detect returns a *Resource that describes the operating system type the // service is running on. func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { osType := runtimeOS() osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) return NewWithAttributes( semconv.SchemaURL, osTypeAttribute, ), nil } // Detect returns a *Resource that describes the operating system the // service is running on. func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { description, err := osDescription() if err != nil { return nil, err } return NewWithAttributes( semconv.SchemaURL, semconv.OSDescription(description), ), nil } // mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime // into an OS type attribute with the corresponding value defined by the semantic // conventions. In case the provided OS name isn't mapped, it's transformed to lowercase // and used as the value for the returned OS type attribute. func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { // the elements in this map are the intersection between // available GOOS values and defined semconv OS types osTypeAttributeMap := map[string]attribute.KeyValue{ "aix": semconv.OSTypeAIX, "darwin": semconv.OSTypeDarwin, "dragonfly": semconv.OSTypeDragonflyBSD, "freebsd": semconv.OSTypeFreeBSD, "linux": semconv.OSTypeLinux, "netbsd": semconv.OSTypeNetBSD, "openbsd": semconv.OSTypeOpenBSD, "solaris": semconv.OSTypeSolaris, "windows": semconv.OSTypeWindows, "zos": semconv.OSTypeZOS, } var osTypeAttribute attribute.KeyValue if attr, ok := osTypeAttributeMap[osType]; ok { osTypeAttribute = attr } else { osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) } return osTypeAttribute } opentelemetry-go-1.21.0/sdk/resource/os_release_darwin.go000066400000000000000000000062731452547353200235400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "encoding/xml" "fmt" "io" "os" ) type plist struct { XMLName xml.Name `xml:"plist"` Dict dict `xml:"dict"` } type dict struct { Key []string `xml:"key"` String []string `xml:"string"` } // osRelease builds a string describing the operating system release based on the // contents of the property list (.plist) system files. If no .plist files are found, // or if the required properties to build the release description string are missing, // an empty string is returned instead. The generated string resembles the output of // the `sw_vers` commandline program, but in a single-line string. For more information // about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. func osRelease() string { file, err := getPlistFile() if err != nil { return "" } defer file.Close() values, err := parsePlistFile(file) if err != nil { return "" } return buildOSRelease(values) } // getPlistFile returns a *os.File pointing to one of the well-known .plist files // available on macOS. If no file can be opened, it returns an error. func getPlistFile() (*os.File, error) { return getFirstAvailableFile([]string{ "/System/Library/CoreServices/SystemVersion.plist", "/System/Library/CoreServices/ServerVersion.plist", }) } // parsePlistFile process the file pointed by `file` as a .plist file and returns // a map with the key-values for each pair of correlated and elements // contained in it. func parsePlistFile(file io.Reader) (map[string]string, error) { var v plist err := xml.NewDecoder(file).Decode(&v) if err != nil { return nil, err } if len(v.Dict.Key) != len(v.Dict.String) { return nil, fmt.Errorf("the number of and elements doesn't match") } properties := make(map[string]string, len(v.Dict.Key)) for i, key := range v.Dict.Key { properties[key] = v.Dict.String[i] } return properties, nil } // buildOSRelease builds a string describing the OS release based on the properties // available on the provided map. It tries to find the `ProductName`, `ProductVersion` // and `ProductBuildVersion` properties. If some of these properties are not found, // it returns an empty string. func buildOSRelease(properties map[string]string) string { productName := properties["ProductName"] productVersion := properties["ProductVersion"] productBuildVersion := properties["ProductBuildVersion"] if productName == "" || productVersion == "" || productBuildVersion == "" { return "" } return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) } opentelemetry-go-1.21.0/sdk/resource/os_release_darwin_test.go000066400000000000000000000113731452547353200245740ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "bytes" "io" "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/resource" ) func TestParsePlistFile(t *testing.T) { standardPlist := bytes.NewBufferString(` ProductBuildVersion 20E232 ProductCopyright 1983-2021 Apple Inc. ProductName macOS ProductUserVisibleVersion 11.3 ProductVersion 11.3 iOSSupportVersion 14.5 `) parsedPlist := map[string]string{ "ProductBuildVersion": "20E232", "ProductCopyright": "1983-2021 Apple Inc.", "ProductName": "macOS", "ProductUserVisibleVersion": "11.3", "ProductVersion": "11.3", "iOSSupportVersion": "14.5", } emptyPlist := bytes.NewBufferString(` `) missingDictPlist := bytes.NewBufferString(` `) unknownElementsPlist := bytes.NewBufferString(` 123 ProductBuildVersion Value 20E232 `) parsedUnknownElementsPlist := map[string]string{ "ProductBuildVersion": "20E232", } tt := []struct { Name string Plist io.Reader Parsed map[string]string }{ {"Standard", standardPlist, parsedPlist}, {"Empty", emptyPlist, map[string]string{}}, {"Missing dict", missingDictPlist, map[string]string{}}, {"Unknown elements", unknownElementsPlist, parsedUnknownElementsPlist}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { result, err := resource.ParsePlistFile(tc.Plist) require.Equal(t, tc.Parsed, result) require.NoError(t, err) }) } } func TestParsePlistFileUnevenKeys(t *testing.T) { plist := bytes.NewBufferString(` ProductBuildVersion 20E232 ProductCopyright `) result, err := resource.ParsePlistFile(plist) require.Nil(t, result) require.Error(t, err) } func TestParsePlistFileMalformed(t *testing.T) { plist := bytes.NewBufferString(` Product `) result, err := resource.ParsePlistFile(plist) require.Nil(t, result) require.Error(t, err) } func TestBuildOSRelease(t *testing.T) { tt := []struct { Name string Properties map[string]string OSRelease string }{ {"Empty properties", map[string]string{}, ""}, {"Empty properties (nil)", nil, ""}, {"Missing product name", map[string]string{ "ProductVersion": "11.3", "ProductBuildVersion": "20E232", }, ""}, {"Missing product version", map[string]string{ "ProductName": "macOS", "ProductBuildVersion": "20E232", }, ""}, {"Missing product build version", map[string]string{ "ProductName": "macOS", "ProductVersion": "11.3", }, ""}, {"All properties available", map[string]string{ "ProductName": "macOS", "ProductVersion": "11.3", "ProductBuildVersion": "20E232", }, "macOS 11.3 (20E232)"}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { result := resource.BuildOSRelease(tc.Properties) require.Equal(t, tc.OSRelease, result) }) } } opentelemetry-go-1.21.0/sdk/resource/os_release_unix.go000066400000000000000000000111751452547353200232340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "bufio" "fmt" "io" "os" "strings" ) // osRelease builds a string describing the operating system release based on the // properties of the os-release file. If no os-release file is found, or if the // required properties to build the release description string are missing, an empty // string is returned instead. For more information about os-release files, see: // https://www.freedesktop.org/software/systemd/man/os-release.html func osRelease() string { file, err := getOSReleaseFile() if err != nil { return "" } defer file.Close() values := parseOSReleaseFile(file) return buildOSRelease(values) } // getOSReleaseFile returns a *os.File pointing to one of the well-known os-release // files, according to their order of preference. If no file can be opened, it // returns an error. func getOSReleaseFile() (*os.File, error) { return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) } // parseOSReleaseFile process the file pointed by `file` as an os-release file and // returns a map with the key-values contained in it. Empty lines or lines starting // with a '#' character are ignored, as well as lines with the missing key=value // separator. Values are unquoted and unescaped. func parseOSReleaseFile(file io.Reader) map[string]string { values := make(map[string]string) scanner := bufio.NewScanner(file) for scanner.Scan() { line := scanner.Text() if skip(line) { continue } key, value, ok := parse(line) if ok { values[key] = value } } return values } // skip returns true if the line is blank or starts with a '#' character, and // therefore should be skipped from processing. func skip(line string) bool { line = strings.TrimSpace(line) return len(line) == 0 || strings.HasPrefix(line, "#") } // parse attempts to split the provided line on the first '=' character, and then // sanitize each side of the split before returning them as a key-value pair. func parse(line string) (string, string, bool) { k, v, found := strings.Cut(line, "=") if !found || len(k) == 0 { return "", "", false } key := strings.TrimSpace(k) value := unescape(unquote(strings.TrimSpace(v))) return key, value, true } // unquote checks whether the string `s` is quoted with double or single quotes // and, if so, returns a version of the string without them. Otherwise it returns // the provided string unchanged. func unquote(s string) string { if len(s) < 2 { return s } if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { return s[1 : len(s)-1] } return s } // unescape removes the `\` prefix from some characters that are expected // to have it added in front of them for escaping purposes. func unescape(s string) string { return strings.NewReplacer( `\$`, `$`, `\"`, `"`, `\'`, `'`, `\\`, `\`, "\\`", "`", ).Replace(s) } // buildOSRelease builds a string describing the OS release based on the properties // available on the provided map. It favors a combination of the `NAME` and `VERSION` // properties as first option (falling back to `VERSION_ID` if `VERSION` isn't // found), and using `PRETTY_NAME` alone if some of the previous are not present. If // none of these properties are found, it returns an empty string. // // The rationale behind not using `PRETTY_NAME` as first choice was that, for some // Linux distributions, it doesn't include the same detail that can be found on the // individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with // other properties can produce "pretty" redundant strings in some cases. func buildOSRelease(values map[string]string) string { var osRelease string name := values["NAME"] version := values["VERSION"] if version == "" { version = values["VERSION_ID"] } if name != "" && version != "" { osRelease = fmt.Sprintf("%s %s", name, version) } else { osRelease = values["PRETTY_NAME"] } return osRelease } opentelemetry-go-1.21.0/sdk/resource/os_release_unix_test.go000066400000000000000000000214331452547353200242710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix dragonfly freebsd linux netbsd openbsd solaris zos package resource_test import ( "bytes" "io" "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/resource" ) func TestParseOSReleaseFile(t *testing.T) { osReleaseUbuntu := bytes.NewBufferString(`NAME="Ubuntu" VERSION="20.04.2 LTS (Focal Fossa)" ID=ubuntu ID_LIKE=debian PRETTY_NAME="Ubuntu 20.04.2 LTS" VERSION_ID="20.04" HOME_URL="https://www.ubuntu.com/" SUPPORT_URL="https://help.ubuntu.com/" BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" VERSION_CODENAME=focal UBUNTU_CODENAME=focal`) parsedUbuntu := map[string]string{ "NAME": "Ubuntu", "VERSION": "20.04.2 LTS (Focal Fossa)", "ID": "ubuntu", "ID_LIKE": "debian", "PRETTY_NAME": "Ubuntu 20.04.2 LTS", "VERSION_ID": "20.04", "HOME_URL": "https://www.ubuntu.com/", "SUPPORT_URL": "https://help.ubuntu.com/", "BUG_REPORT_URL": "https://bugs.launchpad.net/ubuntu/", "PRIVACY_POLICY_URL": "https://www.ubuntu.com/legal/terms-and-policies/privacy-policy", "VERSION_CODENAME": "focal", "UBUNTU_CODENAME": "focal", } osReleaseDebian := bytes.NewBufferString(`PRETTY_NAME="Debian GNU/Linux 10 (buster)" NAME="Debian GNU/Linux" VERSION_ID="10" VERSION="10 (buster)" VERSION_CODENAME=buster ID=debian HOME_URL="https://www.debian.org/" SUPPORT_URL="https://www.debian.org/support" BUG_REPORT_URL="https://bugs.debian.org/"`) parsedDebian := map[string]string{ "PRETTY_NAME": "Debian GNU/Linux 10 (buster)", "NAME": "Debian GNU/Linux", "VERSION_ID": "10", "VERSION": "10 (buster)", "VERSION_CODENAME": "buster", "ID": "debian", "HOME_URL": "https://www.debian.org/", "SUPPORT_URL": "https://www.debian.org/support", "BUG_REPORT_URL": "https://bugs.debian.org/", } osReleaseAlpine := bytes.NewBufferString(`NAME="Alpine Linux" ID=alpine VERSION_ID=3.13.4 PRETTY_NAME="Alpine Linux v3.13" HOME_URL="https://alpinelinux.org/" BUG_REPORT_URL="https://bugs.alpinelinux.org/"`) parsedAlpine := map[string]string{ "NAME": "Alpine Linux", "ID": "alpine", "VERSION_ID": "3.13.4", "PRETTY_NAME": "Alpine Linux v3.13", "HOME_URL": "https://alpinelinux.org/", "BUG_REPORT_URL": "https://bugs.alpinelinux.org/", } osReleaseMock := bytes.NewBufferString(` # This line should be skipped QUOTED1="Quoted value 1" QUOTED2='Quoted value 2' ESCAPED1="\$HOME" ESCAPED2="\"release\"" ESCAPED3="rock\'n\'roll" ESCAPED4="\\var" =line with missing key should be skipped PROP1=name=john PROP2 = Value PROP3='This value will be overwritten by the next one' PROP3='Final value'`) parsedMock := map[string]string{ "QUOTED1": "Quoted value 1", "QUOTED2": "Quoted value 2", "ESCAPED1": "$HOME", "ESCAPED2": `"release"`, "ESCAPED3": "rock'n'roll", "ESCAPED4": `\var`, "PROP1": "name=john", "PROP2": "Value", "PROP3": "Final value", } tt := []struct { Name string OSRelease io.Reader Parsed map[string]string }{ {"Ubuntu", osReleaseUbuntu, parsedUbuntu}, {"Debian", osReleaseDebian, parsedDebian}, {"Alpine", osReleaseAlpine, parsedAlpine}, {"Mock", osReleaseMock, parsedMock}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { result := resource.ParseOSReleaseFile(tc.OSRelease) require.EqualValues(t, tc.Parsed, result) }) } } func TestSkip(t *testing.T) { tt := []struct { Name string Line string Expected bool }{ {"Empty string", "", true}, {"Only whitespace", " ", true}, {"Hashtag prefix 1", "# Sample text", true}, {"Hashtag prefix 2", " # Sample text", true}, {"Hashtag and whitespace 1", "# ", true}, {"Hashtag and whitespace 2", " #", true}, {"Hashtag and whitespace 3", " # ", true}, {"Nonempty string", "Sample text", false}, {"Nonempty string with whitespace around", " Sample text ", false}, {"Nonempty string with middle hashtag", "Sample #text", false}, {"Nonempty string with ending hashtag", "Sample text #", false}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { result := resource.Skip(tc.Line) require.EqualValues(t, tc.Expected, result) }) } } func TestParse(t *testing.T) { tt := []struct { Name string Line string ExpectedKey string ExpectedValue string OK bool }{ {"Empty string", "", "", "", false}, {"No separator", "wrong", "", "", false}, {"Empty key", "=john", "", "", false}, {"Empty key value", "=", "", "", false}, {"Empty value", "name=", "name", "", true}, {"Key value 1", "name=john", "name", "john", true}, {"Key value 2", "name=john=dev", "name", "john=dev", true}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { key, value, ok := resource.Parse(tc.Line) require.EqualValues(t, tc.ExpectedKey, key) require.EqualValues(t, tc.ExpectedValue, value) require.EqualValues(t, tc.OK, ok) }) } } func TestUnquote(t *testing.T) { tt := []struct { Name string Text string Expected string }{ {"Empty string", ``, ``}, {"Single double quote", `"`, `"`}, {"Single single quote", `'`, `'`}, {"Empty double quotes", `""`, ``}, {"Empty single quotes", `''`, ``}, {"Empty mixed quotes 1", `"'`, `"'`}, {"Empty mixed quotes 2", `'"`, `'"`}, {"Double quotes", `"Sample text"`, `Sample text`}, {"Single quotes", `'Sample text'`, `Sample text`}, {"Half-open starting double quote", `"Sample text`, `"Sample text`}, {"Half-open ending double quote", `Sample text"`, `Sample text"`}, {"Half-open starting single quote", `'Sample text`, `'Sample text`}, {"Half-open ending single quote", `Sample text'`, `Sample text'`}, {"Double double quotes", `""Sample text""`, `"Sample text"`}, {"Double single quotes", `''Sample text''`, `'Sample text'`}, {"Mismatch quotes 1", `"Sample text'`, `"Sample text'`}, {"Mismatch quotes 2", `'Sample text"`, `'Sample text"`}, {"No quotes", `Sample text`, `Sample text`}, {"Internal double quote", `Sample "text"`, `Sample "text"`}, {"Internal single quote", `Sample 'text'`, `Sample 'text'`}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { result := resource.Unquote(tc.Text) require.EqualValues(t, tc.Expected, result) }) } } func TestUnescape(t *testing.T) { tt := []struct { Name string Text string Expected string }{ {"Empty string", ``, ``}, {"Escaped dollar sign", `\$var`, `$var`}, {"Escaped double quote", `\"var`, `"var`}, {"Escaped single quote", `\'var`, `'var`}, {"Escaped backslash", `\\var`, `\var`}, {"Escaped backtick", "\\`var", "`var"}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { result := resource.Unescape(tc.Text) require.EqualValues(t, tc.Expected, result) }) } } func TestBuildOSRelease(t *testing.T) { tt := []struct { Name string Values map[string]string Expected string }{ {"Nil values", nil, ""}, {"Empty values", map[string]string{}, ""}, {"Name and version only", map[string]string{ "NAME": "Ubuntu", "VERSION": "20.04.2 LTS (Focal Fossa)", }, "Ubuntu 20.04.2 LTS (Focal Fossa)"}, {"Name and version preferred", map[string]string{ "NAME": "Ubuntu", "VERSION": "20.04.2 LTS (Focal Fossa)", "VERSION_ID": "20.04", "PRETTY_NAME": "Ubuntu 20.04.2 LTS", }, "Ubuntu 20.04.2 LTS (Focal Fossa)"}, {"Version ID fallback", map[string]string{ "NAME": "Ubuntu", "VERSION_ID": "20.04", }, "Ubuntu 20.04"}, {"Pretty name fallback due to missing name", map[string]string{ "VERSION": "20.04.2 LTS (Focal Fossa)", "PRETTY_NAME": "Ubuntu 20.04.2 LTS", }, "Ubuntu 20.04.2 LTS"}, {"Pretty name fallback due to missing version", map[string]string{ "NAME": "Ubuntu", "PRETTY_NAME": "Ubuntu 20.04.2 LTS", }, "Ubuntu 20.04.2 LTS"}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { result := resource.BuildOSRelease(tc.Values) require.EqualValues(t, tc.Expected, result) }) } } opentelemetry-go-1.21.0/sdk/resource/os_test.go000066400000000000000000000036741452547353200215350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) func mockRuntimeProviders() { resource.SetRuntimeProviders( fakeRuntimeNameProvider, fakeRuntimeVersionProvider, func() string { return "LINUX" }, fakeRuntimeArchProvider, ) resource.SetOSDescriptionProvider( func() (string, error) { return "Test", nil }, ) } func TestMapRuntimeOSToSemconvOSType(t *testing.T) { tt := []struct { Name string Goos string OSType attribute.KeyValue }{ {"Apple Darwin", "darwin", semconv.OSTypeDarwin}, {"DragonFly BSD", "dragonfly", semconv.OSTypeDragonflyBSD}, {"FreeBSD", "freebsd", semconv.OSTypeFreeBSD}, {"Linux", "linux", semconv.OSTypeLinux}, {"NetBSD", "netbsd", semconv.OSTypeNetBSD}, {"OpenBSD", "openbsd", semconv.OSTypeOpenBSD}, {"Oracle Solaris", "solaris", semconv.OSTypeSolaris}, {"Microsoft Windows", "windows", semconv.OSTypeWindows}, {"Unknown", "unknown", semconv.OSTypeKey.String("unknown")}, {"UNKNOWN", "UNKNOWN", semconv.OSTypeKey.String("unknown")}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { osTypeAttribute := resource.MapRuntimeOSToSemconvOSType(tc.Goos) require.EqualValues(t, osTypeAttribute, tc.OSType) }) } } opentelemetry-go-1.21.0/sdk/resource/os_unix.go000066400000000000000000000052101452547353200215250ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "fmt" "os" "golang.org/x/sys/unix" ) type unameProvider func(buf *unix.Utsname) (err error) var defaultUnameProvider unameProvider = unix.Uname var currentUnameProvider = defaultUnameProvider func setDefaultUnameProvider() { setUnameProvider(defaultUnameProvider) } func setUnameProvider(unameProvider unameProvider) { currentUnameProvider = unameProvider } // platformOSDescription returns a human readable OS version information string. // The final string combines OS release information (where available) and the // result of the `uname` system call. func platformOSDescription() (string, error) { uname, err := uname() if err != nil { return "", err } osRelease := osRelease() if osRelease != "" { return fmt.Sprintf("%s (%s)", osRelease, uname), nil } return uname, nil } // uname issues a uname(2) system call (or equivalent on systems which doesn't // have one) and formats the output in a single string, similar to the output // of the `uname` commandline program. The final string resembles the one // obtained with a call to `uname -snrvm`. func uname() (string, error) { var utsName unix.Utsname err := currentUnameProvider(&utsName) if err != nil { return "", err } return fmt.Sprintf("%s %s %s %s %s", unix.ByteSliceToString(utsName.Sysname[:]), unix.ByteSliceToString(utsName.Nodename[:]), unix.ByteSliceToString(utsName.Release[:]), unix.ByteSliceToString(utsName.Version[:]), unix.ByteSliceToString(utsName.Machine[:]), ), nil } // getFirstAvailableFile returns an *os.File of the first available // file from a list of candidate file paths. func getFirstAvailableFile(candidates []string) (*os.File, error) { for _, c := range candidates { file, err := os.Open(c) if err == nil { return file, nil } } return nil, fmt.Errorf("no candidate file available: %v", candidates) } opentelemetry-go-1.21.0/sdk/resource/os_unix_test.go000066400000000000000000000063071452547353200225740ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos // +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package resource_test import ( "fmt" "os" "testing" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" "go.opentelemetry.io/otel/sdk/resource" ) func fakeUnameProvider(buf *unix.Utsname) error { copy(buf.Sysname[:], "Mock OS") copy(buf.Nodename[:], "DESKTOP-PC") copy(buf.Release[:], "5.0.0") copy(buf.Version[:], "#1 SMP Thu May 6 12:34:56 UTC 2021") copy(buf.Machine[:], "x86_64") return nil } func fakeUnameProviderWithError(buf *unix.Utsname) error { return fmt.Errorf("error invoking uname(2)") } func TestUname(t *testing.T) { resource.SetUnameProvider(fakeUnameProvider) uname, err := resource.Uname() require.Equal(t, uname, "Mock OS DESKTOP-PC 5.0.0 #1 SMP Thu May 6 12:34:56 UTC 2021 x86_64") require.NoError(t, err) resource.SetDefaultUnameProvider() } func TestUnameError(t *testing.T) { resource.SetUnameProvider(fakeUnameProviderWithError) uname, err := resource.Uname() require.Empty(t, uname) require.Error(t, err) resource.SetDefaultUnameProvider() } func TestGetFirstAvailableFile(t *testing.T) { tempDir := t.TempDir() file1, _ := os.CreateTemp(tempDir, "candidate_") file2, _ := os.CreateTemp(tempDir, "candidate_") filename1, filename2 := file1.Name(), file2.Name() tt := []struct { Name string Candidates []string ExpectedFileName string ExpectedErr string }{ {"Gets first, skip second candidate", []string{filename1, filename2}, filename1, ""}, {"Skips first, gets second candidate", []string{"does_not_exists", filename2}, filename2, ""}, {"Skips first, gets second, ignores third candidate", []string{"does_not_exists", filename2, filename1}, filename2, ""}, {"No candidates (empty slice)", []string{}, "", "no candidate file available: []"}, {"No candidates (nil slice)", nil, "", "no candidate file available: []"}, {"Single nonexisting candidate", []string{"does_not_exists"}, "", "no candidate file available: [does_not_exists]"}, {"Multiple nonexisting candidates", []string{"does_not_exists", "this_either"}, "", "no candidate file available: [does_not_exists this_either]"}, } for _, tc := range tt { tc := tc t.Run(tc.Name, func(t *testing.T) { file, err := resource.GetFirstAvailableFile(tc.Candidates) filename := "" if file != nil { filename = file.Name() } errString := "" if err != nil { errString = err.Error() } require.Equal(t, tc.ExpectedFileName, filename) require.Equal(t, tc.ExpectedErr, errString) }) } } opentelemetry-go-1.21.0/sdk/resource/os_unsupported.go000066400000000000000000000022071452547353200231350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !aix // +build !darwin // +build !dragonfly // +build !freebsd // +build !linux // +build !netbsd // +build !openbsd // +build !solaris // +build !windows // +build !zos package resource // import "go.opentelemetry.io/otel/sdk/resource" // platformOSDescription is a placeholder implementation for OSes // for which this project currently doesn't support os.description // attribute detection. See build tags declaration early on this file // for a list of unsupported OSes. func platformOSDescription() (string, error) { return "", nil } opentelemetry-go-1.21.0/sdk/resource/os_windows.go000066400000000000000000000054701452547353200222440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "fmt" "strconv" "golang.org/x/sys/windows/registry" ) // platformOSDescription returns a human readable OS version information string. // It does so by querying registry values under the // `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string // resembles the one displayed by the Version Reporter Applet (winver.exe). func platformOSDescription() (string, error) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) if err != nil { return "", err } defer k.Close() var ( productName = readProductName(k) displayVersion = readDisplayVersion(k) releaseID = readReleaseID(k) currentMajorVersionNumber = readCurrentMajorVersionNumber(k) currentMinorVersionNumber = readCurrentMinorVersionNumber(k) currentBuildNumber = readCurrentBuildNumber(k) ubr = readUBR(k) ) if displayVersion != "" { displayVersion += " " } return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", productName, displayVersion, releaseID, currentMajorVersionNumber, currentMinorVersionNumber, currentBuildNumber, ubr, ), nil } func getStringValue(name string, k registry.Key) string { value, _, _ := k.GetStringValue(name) return value } func getIntegerValue(name string, k registry.Key) uint64 { value, _, _ := k.GetIntegerValue(name) return value } func readProductName(k registry.Key) string { return getStringValue("ProductName", k) } func readDisplayVersion(k registry.Key) string { return getStringValue("DisplayVersion", k) } func readReleaseID(k registry.Key) string { return getStringValue("ReleaseID", k) } func readCurrentMajorVersionNumber(k registry.Key) string { return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) } func readCurrentMinorVersionNumber(k registry.Key) string { return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) } func readCurrentBuildNumber(k registry.Key) string { return getStringValue("CurrentBuildNumber", k) } func readUBR(k registry.Key) string { return strconv.FormatUint(getIntegerValue("UBR", k), 10) } opentelemetry-go-1.21.0/sdk/resource/os_windows_test.go000066400000000000000000000035551452547353200233050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/windows/registry" "go.opentelemetry.io/otel/sdk/resource" ) func TestPlatformOSDescription(t *testing.T) { osDescription, err := resource.PlatformOSDescription() require.NoError(t, err) require.Regexp(t, `^(\w+\s)+\(\d+\)\s\[Version\s\d+(\.\d+){3}\]$`, osDescription) } func TestReadRegistryValues(t *testing.T) { k, err := registry.OpenKey( registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) require.NoError(t, err, "should open Windows CurrentVersion registry key") defer k.Close() assert.NotEmpty(t, resource.ReadProductName(k), "should read ProductName") assert.NotEmpty(t, resource.ReadReleaseID(k), "should read ReleaseID") assert.NotEmpty(t, resource.ReadCurrentMajorVersionNumber(k), "should read CurrentMajorVersionNumber") assert.NotEmpty(t, resource.ReadCurrentMinorVersionNumber(k), "should read CurrentMinorVersionNumber") assert.NotEmpty(t, resource.ReadCurrentBuildNumber(k), "should read CurrentBuildNumber") assert.NotEmpty(t, resource.ReadUBR(k), "should read UBR") assert.NotPanics(t, func() { resource.ReadDisplayVersion(k) }, "should not panic when reading DisplayVersion") } opentelemetry-go-1.21.0/sdk/resource/process.go000066400000000000000000000137531452547353200215320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "fmt" "os" "os/user" "path/filepath" "runtime" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) type ( pidProvider func() int executablePathProvider func() (string, error) commandArgsProvider func() []string ownerProvider func() (*user.User, error) runtimeNameProvider func() string runtimeVersionProvider func() string runtimeOSProvider func() string runtimeArchProvider func() string ) var ( defaultPidProvider pidProvider = os.Getpid defaultExecutablePathProvider executablePathProvider = os.Executable defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } defaultOwnerProvider ownerProvider = user.Current defaultRuntimeNameProvider runtimeNameProvider = func() string { if runtime.Compiler == "gc" { return "go" } return runtime.Compiler } defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } ) var ( pid = defaultPidProvider executablePath = defaultExecutablePathProvider commandArgs = defaultCommandArgsProvider owner = defaultOwnerProvider runtimeName = defaultRuntimeNameProvider runtimeVersion = defaultRuntimeVersionProvider runtimeOS = defaultRuntimeOSProvider runtimeArch = defaultRuntimeArchProvider ) func setDefaultOSProviders() { setOSProviders( defaultPidProvider, defaultExecutablePathProvider, defaultCommandArgsProvider, ) } func setOSProviders( pidProvider pidProvider, executablePathProvider executablePathProvider, commandArgsProvider commandArgsProvider, ) { pid = pidProvider executablePath = executablePathProvider commandArgs = commandArgsProvider } func setDefaultRuntimeProviders() { setRuntimeProviders( defaultRuntimeNameProvider, defaultRuntimeVersionProvider, defaultRuntimeOSProvider, defaultRuntimeArchProvider, ) } func setRuntimeProviders( runtimeNameProvider runtimeNameProvider, runtimeVersionProvider runtimeVersionProvider, runtimeOSProvider runtimeOSProvider, runtimeArchProvider runtimeArchProvider, ) { runtimeName = runtimeNameProvider runtimeVersion = runtimeVersionProvider runtimeOS = runtimeOSProvider runtimeArch = runtimeArchProvider } func setDefaultUserProviders() { setUserProviders(defaultOwnerProvider) } func setUserProviders(ownerProvider ownerProvider) { owner = ownerProvider } type ( processPIDDetector struct{} processExecutableNameDetector struct{} processExecutablePathDetector struct{} processCommandArgsDetector struct{} processOwnerDetector struct{} processRuntimeNameDetector struct{} processRuntimeVersionDetector struct{} processRuntimeDescriptionDetector struct{} ) // Detect returns a *Resource that describes the process identifier (PID) of the // executing process. func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil } // Detect returns a *Resource that describes the name of the process executable. func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { executableName := filepath.Base(commandArgs()[0]) return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil } // Detect returns a *Resource that describes the full path of the process executable. func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { executablePath, err := executablePath() if err != nil { return nil, err } return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil } // Detect returns a *Resource that describes all the command arguments as received // by the process. func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil } // Detect returns a *Resource that describes the username of the user that owns the // process. func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { owner, err := owner() if err != nil { return nil, err } return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil } // Detect returns a *Resource that describes the name of the compiler used to compile // this process image. func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil } // Detect returns a *Resource that describes the version of the runtime of this process. func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil } // Detect returns a *Resource that describes the runtime of this process. func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { runtimeDescription := fmt.Sprintf( "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) return NewWithAttributes( semconv.SchemaURL, semconv.ProcessRuntimeDescription(runtimeDescription), ), nil } opentelemetry-go-1.21.0/sdk/resource/process_test.go000066400000000000000000000101321452547353200225550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "context" "fmt" "os" "os/user" "runtime" "testing" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/sdk/resource" ) var ( fakePID = 123 fakeExecutablePath = "/fake/path/mock" fakeCommandArgs = []string{"mock", "-t", "30"} fakeOwner = "gopher" fakeRuntimeName = "gcmock" fakeRuntimeVersion = "go1.2.3" fakeRuntimeOS = "linux" fakeRuntimeArch = "amd64" ) var ( fakeExecutableName = "mock" fakeRuntimeDescription = "go version go1.2.3 linux/amd64" ) var ( fakePidProvider = func() int { return fakePID } fakeExecutablePathProvider = func() (string, error) { return fakeExecutablePath, nil } fakeCommandArgsProvider = func() []string { return fakeCommandArgs } fakeOwnerProvider = func() (*user.User, error) { return &user.User{Username: fakeOwner}, nil } fakeRuntimeNameProvider = func() string { return fakeRuntimeName } fakeRuntimeVersionProvider = func() string { return fakeRuntimeVersion } fakeRuntimeOSProvider = func() string { return fakeRuntimeOS } fakeRuntimeArchProvider = func() string { return fakeRuntimeArch } ) var ( fakeExecutablePathProviderWithError = func() (string, error) { return "", fmt.Errorf("unable to get process executable") } fakeOwnerProviderWithError = func() (*user.User, error) { return nil, fmt.Errorf("unable to get process user") } ) func mockProcessAttributesProviders() { resource.SetOSProviders( fakePidProvider, fakeExecutablePathProvider, fakeCommandArgsProvider, ) resource.SetRuntimeProviders( fakeRuntimeNameProvider, fakeRuntimeVersionProvider, fakeRuntimeOSProvider, fakeRuntimeArchProvider, ) resource.SetUserProviders( fakeOwnerProvider, ) } func mockProcessAttributesProvidersWithErrors() { resource.SetOSProviders( fakePidProvider, fakeExecutablePathProviderWithError, fakeCommandArgsProvider, ) resource.SetRuntimeProviders( fakeRuntimeNameProvider, fakeRuntimeVersionProvider, fakeRuntimeOSProvider, fakeRuntimeArchProvider, ) resource.SetUserProviders( fakeOwnerProviderWithError, ) } func restoreAttributesProviders() { resource.SetDefaultOSProviders() resource.SetDefaultRuntimeProviders() resource.SetDefaultUserProviders() resource.SetDefaultOSDescriptionProvider() resource.SetDefaultContainerProviders() } func TestWithProcessFuncsErrors(t *testing.T) { mockProcessAttributesProvidersWithErrors() t.Run("WithExecutablePath", testWithProcessExecutablePathError) t.Run("WithOwner", testWithProcessOwnerError) restoreAttributesProviders() } func TestCommandArgs(t *testing.T) { require.EqualValues(t, os.Args, resource.CommandArgs()) } func TestRuntimeName(t *testing.T) { if runtime.Compiler == "gc" { require.EqualValues(t, "go", resource.RuntimeName()) } else { require.EqualValues(t, runtime.Compiler, resource.RuntimeName()) } } func TestRuntimeOS(t *testing.T) { require.EqualValues(t, runtime.GOOS, resource.RuntimeOS()) } func TestRuntimeArch(t *testing.T) { require.EqualValues(t, runtime.GOARCH, resource.RuntimeArch()) } func testWithProcessExecutablePathError(t *testing.T) { ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessExecutablePath(), ) require.Error(t, err) require.EqualValues(t, map[string]string{}, toMap(res)) } func testWithProcessOwnerError(t *testing.T) { ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessOwner(), ) require.Error(t, err) require.EqualValues(t, map[string]string{}, toMap(res)) } opentelemetry-go-1.21.0/sdk/resource/resource.go000066400000000000000000000171301452547353200216740ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "errors" "sync" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" ) // Resource describes an entity about which identifying information // and metadata is exposed. Resource is an immutable object, // equivalent to a map from key to unique value. // // Resources should be passed and stored as pointers // (`*resource.Resource`). The `nil` value is equivalent to an empty // Resource. type Resource struct { attrs attribute.Set schemaURL string } var ( defaultResource *Resource defaultResourceOnce sync.Once ) var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL") // New returns a Resource combined from the user-provided detectors. func New(ctx context.Context, opts ...Option) (*Resource, error) { cfg := config{} for _, opt := range opts { cfg = opt.apply(cfg) } r := &Resource{schemaURL: cfg.schemaURL} return r, detect(ctx, r, cfg.detectors) } // NewWithAttributes creates a resource from attrs and associates the resource with a // schema URL. If attrs contains duplicate keys, the last value will be used. If attrs // contains any invalid items those items will be dropped. The attrs are assumed to be // in a schema identified by schemaURL. func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { resource := NewSchemaless(attrs...) resource.schemaURL = schemaURL return resource } // NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, // the last value will be used. If attrs contains any invalid items those items will // be dropped. The resource will not be associated with a schema URL. If the schema // of the attrs is known use NewWithAttributes instead. func NewSchemaless(attrs ...attribute.KeyValue) *Resource { if len(attrs) == 0 { return &Resource{} } // Ensure attributes comply with the specification: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { return kv.Valid() }) // If attrs only contains invalid entries do not allocate a new resource. if s.Len() == 0 { return &Resource{} } return &Resource{attrs: s} //nolint } // String implements the Stringer interface and provides a // human-readable form of the resource. // // Avoid using this representation as the key in a map of resources, // use Equivalent() as the key instead. func (r *Resource) String() string { if r == nil { return "" } return r.attrs.Encoded(attribute.DefaultEncoder()) } // MarshalLog is the marshaling function used by the logging system to represent this exporter. func (r *Resource) MarshalLog() interface{} { return struct { Attributes attribute.Set SchemaURL string }{ Attributes: r.attrs, SchemaURL: r.schemaURL, } } // Attributes returns a copy of attributes from the resource in a sorted order. // To avoid allocating a new slice, use an iterator. func (r *Resource) Attributes() []attribute.KeyValue { if r == nil { r = Empty() } return r.attrs.ToSlice() } // SchemaURL returns the schema URL associated with Resource r. func (r *Resource) SchemaURL() string { if r == nil { return "" } return r.schemaURL } // Iter returns an iterator of the Resource attributes. // This is ideal to use if you do not want a copy of the attributes. func (r *Resource) Iter() attribute.Iterator { if r == nil { r = Empty() } return r.attrs.Iter() } // Equal returns true when a Resource is equivalent to this Resource. func (r *Resource) Equal(eq *Resource) bool { if r == nil { r = Empty() } if eq == nil { eq = Empty() } return r.Equivalent() == eq.Equivalent() } // Merge creates a new resource by combining resource a and b. // // If there are common keys between resource a and b, then the value // from resource b will overwrite the value from resource a, even // if resource b's value is empty. // // The SchemaURL of the resources will be merged according to the spec rules: // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge // If the resources have different non-empty schemaURL an empty resource and an error // will be returned. func Merge(a, b *Resource) (*Resource, error) { if a == nil && b == nil { return Empty(), nil } if a == nil { return b, nil } if b == nil { return a, nil } // Merge the schema URL. var schemaURL string switch true { case a.schemaURL == "": schemaURL = b.schemaURL case b.schemaURL == "": schemaURL = a.schemaURL case a.schemaURL == b.schemaURL: schemaURL = a.schemaURL default: return Empty(), errMergeConflictSchemaURL } // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) mi := attribute.NewMergeIterator(b.Set(), a.Set()) combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) for mi.Next() { combine = append(combine, mi.Attribute()) } merged := NewWithAttributes(schemaURL, combine...) return merged, nil } // Empty returns an instance of Resource with no attributes. It is // equivalent to a `nil` Resource. func Empty() *Resource { return &Resource{} } // Default returns an instance of Resource with a default // "service.name" and OpenTelemetrySDK attributes. func Default() *Resource { defaultResourceOnce.Do(func() { var err error defaultResource, err = Detect( context.Background(), defaultServiceNameDetector{}, fromEnv{}, telemetrySDK{}, ) if err != nil { otel.Handle(err) } // If Detect did not return a valid resource, fall back to emptyResource. if defaultResource == nil { defaultResource = &Resource{} } }) return defaultResource } // Environment returns an instance of Resource with attributes // extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. func Environment() *Resource { detector := &fromEnv{} resource, err := detector.Detect(context.Background()) if err != nil { otel.Handle(err) } return resource } // Equivalent returns an object that can be compared for equality // between two resources. This value is suitable for use as a key in // a map. func (r *Resource) Equivalent() attribute.Distinct { return r.Set().Equivalent() } // Set returns the equivalent *attribute.Set of this resource's attributes. func (r *Resource) Set() *attribute.Set { if r == nil { r = Empty() } return &r.attrs } // MarshalJSON encodes the resource attributes as a JSON list of { "Key": // "...", "Value": ... } pairs in order sorted by key. func (r *Resource) MarshalJSON() ([]byte, error) { if r == nil { r = Empty() } return r.attrs.MarshalJSON() } // Len returns the number of unique key-values in this Resource. func (r *Resource) Len() int { if r == nil { return 0 } return r.attrs.Len() } // Encoded returns an encoded representation of the resource. func (r *Resource) Encoded(enc attribute.Encoder) string { if r == nil { return "" } return r.attrs.Encoded(enc) } opentelemetry-go-1.21.0/sdk/resource/resource_test.go000066400000000000000000000472531452547353200227440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource_test import ( "context" "encoding/json" "errors" "fmt" "os" "strings" "sync" "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk" ottest "go.opentelemetry.io/otel/sdk/internal/internaltest" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" ) var ( kv11 = attribute.String("k1", "v11") kv12 = attribute.String("k1", "v12") kv21 = attribute.String("k2", "v21") kv31 = attribute.String("k3", "v31") kv41 = attribute.String("k4", "v41") kv42 = attribute.String("k4", "") ) func TestNewWithAttributes(t *testing.T) { cases := []struct { name string in []attribute.KeyValue want []attribute.KeyValue }{ { name: "Key with common key order1", in: []attribute.KeyValue{kv12, kv11, kv21}, want: []attribute.KeyValue{kv11, kv21}, }, { name: "Key with common key order2", in: []attribute.KeyValue{kv11, kv12, kv21}, want: []attribute.KeyValue{kv12, kv21}, }, { name: "Key with nil", in: nil, want: nil, }, } for _, c := range cases { t.Run(fmt.Sprintf("case-%s", c.name), func(t *testing.T) { res := resource.NewSchemaless(c.in...) if diff := cmp.Diff( res.Attributes(), c.want, cmp.AllowUnexported(attribute.Value{})); diff != "" { t.Fatalf("unwanted result: diff %+v,", diff) } }) } } func TestMerge(t *testing.T) { cases := []struct { name string a, b *resource.Resource want []attribute.KeyValue isErr bool schemaURL string }{ { name: "Merge 2 nils", a: nil, b: nil, want: nil, }, { name: "Merge with no overlap, no nil", a: resource.NewSchemaless(kv11, kv31), b: resource.NewSchemaless(kv21, kv41), want: []attribute.KeyValue{kv11, kv21, kv31, kv41}, }, { name: "Merge with no overlap, no nil, not interleaved", a: resource.NewSchemaless(kv11, kv21), b: resource.NewSchemaless(kv31, kv41), want: []attribute.KeyValue{kv11, kv21, kv31, kv41}, }, { name: "Merge with common key order1", a: resource.NewSchemaless(kv11), b: resource.NewSchemaless(kv12, kv21), want: []attribute.KeyValue{kv12, kv21}, }, { name: "Merge with common key order2", a: resource.NewSchemaless(kv12, kv21), b: resource.NewSchemaless(kv11), want: []attribute.KeyValue{kv11, kv21}, }, { name: "Merge with common key order4", a: resource.NewSchemaless(kv11, kv21, kv41), b: resource.NewSchemaless(kv31, kv41), want: []attribute.KeyValue{kv11, kv21, kv31, kv41}, }, { name: "Merge with no keys", a: resource.NewSchemaless(), b: resource.NewSchemaless(), want: nil, }, { name: "Merge with first resource no keys", a: resource.NewSchemaless(), b: resource.NewSchemaless(kv21), want: []attribute.KeyValue{kv21}, }, { name: "Merge with second resource no keys", a: resource.NewSchemaless(kv11), b: resource.NewSchemaless(), want: []attribute.KeyValue{kv11}, }, { name: "Merge with first resource nil", a: nil, b: resource.NewSchemaless(kv21), want: []attribute.KeyValue{kv21}, }, { name: "Merge with second resource nil", a: resource.NewSchemaless(kv11), b: nil, want: []attribute.KeyValue{kv11}, }, { name: "Merge with first resource value empty string", a: resource.NewSchemaless(kv42), b: resource.NewSchemaless(kv41), want: []attribute.KeyValue{kv41}, }, { name: "Merge with second resource value empty string", a: resource.NewSchemaless(kv41), b: resource.NewSchemaless(kv42), want: []attribute.KeyValue{kv42}, }, { name: "Merge with first resource with schema", a: resource.NewWithAttributes("https://opentelemetry.io/schemas/1.4.0", kv41), b: resource.NewSchemaless(kv42), want: []attribute.KeyValue{kv42}, schemaURL: "https://opentelemetry.io/schemas/1.4.0", }, { name: "Merge with second resource with schema", a: resource.NewSchemaless(kv41), b: resource.NewWithAttributes("https://opentelemetry.io/schemas/1.4.0", kv42), want: []attribute.KeyValue{kv42}, schemaURL: "https://opentelemetry.io/schemas/1.4.0", }, { name: "Merge with different schemas", a: resource.NewWithAttributes("https://opentelemetry.io/schemas/1.4.0", kv41), b: resource.NewWithAttributes("https://opentelemetry.io/schemas/1.3.0", kv42), want: nil, isErr: true, }, } for _, c := range cases { t.Run(fmt.Sprintf("case-%s", c.name), func(t *testing.T) { res, err := resource.Merge(c.a, c.b) if c.isErr { assert.Error(t, err) } else { assert.NoError(t, err) } assert.EqualValues(t, c.schemaURL, res.SchemaURL()) if diff := cmp.Diff( res.Attributes(), c.want, cmp.AllowUnexported(attribute.Value{})); diff != "" { t.Fatalf("unwanted result: diff %+v,", diff) } }) } } func TestEmpty(t *testing.T) { var res *resource.Resource assert.Equal(t, "", res.SchemaURL()) assert.Equal(t, "", res.String()) assert.Equal(t, []attribute.KeyValue(nil), res.Attributes()) it := res.Iter() assert.Equal(t, 0, it.Len()) assert.True(t, res.Equal(res)) } func TestDefault(t *testing.T) { res := resource.Default() require.False(t, res.Equal(resource.Empty())) require.True(t, res.Set().HasValue(semconv.ServiceNameKey)) serviceName, _ := res.Set().Value(semconv.ServiceNameKey) require.True(t, strings.HasPrefix(serviceName.AsString(), "unknown_service:")) require.Greaterf(t, len(serviceName.AsString()), len("unknown_service:"), "default service.name should include executable name") require.Contains(t, res.Attributes(), semconv.TelemetrySDKLanguageGo) require.Contains(t, res.Attributes(), semconv.TelemetrySDKVersion(sdk.Version())) require.Contains(t, res.Attributes(), semconv.TelemetrySDKName("opentelemetry")) } func TestString(t *testing.T) { for _, test := range []struct { kvs []attribute.KeyValue want string }{ { kvs: nil, want: "", }, { kvs: []attribute.KeyValue{}, want: "", }, { kvs: []attribute.KeyValue{kv11}, want: "k1=v11", }, { kvs: []attribute.KeyValue{kv11, kv12}, want: "k1=v12", }, { kvs: []attribute.KeyValue{kv11, kv21}, want: "k1=v11,k2=v21", }, { kvs: []attribute.KeyValue{kv21, kv11}, want: "k1=v11,k2=v21", }, { kvs: []attribute.KeyValue{kv11, kv21, kv31}, want: "k1=v11,k2=v21,k3=v31", }, { kvs: []attribute.KeyValue{kv31, kv11, kv21}, want: "k1=v11,k2=v21,k3=v31", }, { kvs: []attribute.KeyValue{attribute.String("A", "a"), attribute.String("B", "b")}, want: "A=a,B=b", }, { kvs: []attribute.KeyValue{attribute.String("A", "a,B=b")}, want: `A=a\,B\=b`, }, { kvs: []attribute.KeyValue{attribute.String("A", `a,B\=b`)}, want: `A=a\,B\\\=b`, }, { kvs: []attribute.KeyValue{attribute.String("A=a,B", `b`)}, want: `A\=a\,B=b`, }, { kvs: []attribute.KeyValue{attribute.String(`A=a\,B`, `b`)}, want: `A\=a\\\,B=b`, }, { kvs: []attribute.KeyValue{attribute.String("", "invalid")}, want: "", }, { kvs: []attribute.KeyValue{attribute.String("", "invalid"), attribute.String("B", "b")}, want: "B=b", }, } { if got := resource.NewSchemaless(test.kvs...).String(); got != test.want { t.Errorf("Resource(%v).String() = %q, want %q", test.kvs, got, test.want) } } } const envVar = "OTEL_RESOURCE_ATTRIBUTES" func TestMarshalJSON(t *testing.T) { r := resource.NewSchemaless(attribute.Int64("A", 1), attribute.String("C", "D")) data, err := json.Marshal(r) require.NoError(t, err) require.Equal(t, `[{"Key":"A","Value":{"Type":"INT64","Value":1}},{"Key":"C","Value":{"Type":"STRING","Value":"D"}}]`, string(data)) } func TestNew(t *testing.T) { tc := []struct { name string envars string detectors []resource.Detector options []resource.Option resourceValues map[string]string schemaURL string isErr bool }{ { name: "No Options returns empty resource", envars: "key=value,other=attr", options: nil, resourceValues: map[string]string{}, }, { name: "Nil Detectors works", envars: "key=value,other=attr", options: []resource.Option{ resource.WithDetectors(), }, resourceValues: map[string]string{}, }, { name: "Only Host", envars: "from=here", options: []resource.Option{ resource.WithHost(), }, resourceValues: map[string]string{ "host.name": hostname(), }, schemaURL: semconv.SchemaURL, }, { name: "Only Env", envars: "key=value,other=attr", options: []resource.Option{ resource.WithFromEnv(), }, resourceValues: map[string]string{ "key": "value", "other": "attr", }, }, { name: "Only TelemetrySDK", envars: "", options: []resource.Option{ resource.WithTelemetrySDK(), }, resourceValues: map[string]string{ "telemetry.sdk.name": "opentelemetry", "telemetry.sdk.language": "go", "telemetry.sdk.version": sdk.Version(), }, schemaURL: semconv.SchemaURL, }, { name: "WithAttributes", envars: "key=value,other=attr", options: []resource.Option{ resource.WithAttributes(attribute.String("A", "B")), }, resourceValues: map[string]string{ "A": "B", }, }, { name: "With schema url", envars: "", options: []resource.Option{ resource.WithAttributes(attribute.String("A", "B")), resource.WithSchemaURL("https://opentelemetry.io/schemas/1.0.0"), }, resourceValues: map[string]string{ "A": "B", }, schemaURL: "https://opentelemetry.io/schemas/1.0.0", }, { name: "With conflicting schema urls", envars: "", options: []resource.Option{ resource.WithDetectors( resource.StringDetector("https://opentelemetry.io/schemas/1.0.0", semconv.HostNameKey, os.Hostname), ), resource.WithSchemaURL("https://opentelemetry.io/schemas/1.1.0"), }, resourceValues: map[string]string{}, schemaURL: "", isErr: true, }, { name: "With conflicting detector schema urls", envars: "", options: []resource.Option{ resource.WithDetectors( resource.StringDetector("https://opentelemetry.io/schemas/1.0.0", semconv.HostNameKey, os.Hostname), resource.StringDetector("https://opentelemetry.io/schemas/1.1.0", semconv.HostNameKey, func() (string, error) { return "", errors.New("fail") }), ), resource.WithSchemaURL("https://opentelemetry.io/schemas/1.2.0"), }, resourceValues: map[string]string{}, schemaURL: "", isErr: true, }, } for _, tt := range tc { t.Run(tt.name, func(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ envVar: tt.envars, }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() ctx := context.Background() res, err := resource.New(ctx, tt.options...) if tt.isErr { require.Error(t, err) } else { require.NoError(t, err) } require.EqualValues(t, tt.resourceValues, toMap(res)) // TODO: do we need to ensure that resource is never nil and eliminate the // following if? if res != nil { assert.EqualValues(t, tt.schemaURL, res.SchemaURL()) } }) } } func TestNewWrapedError(t *testing.T) { localErr := errors.New("local error") _, err := resource.New( context.Background(), resource.WithDetectors( resource.StringDetector("", "", func() (string, error) { return "", localErr }), resource.StringDetector("", "", func() (string, error) { return "", assert.AnError }), ), ) assert.ErrorIs(t, err, localErr) assert.ErrorIs(t, err, assert.AnError) assert.NotErrorIs(t, err, errors.New("false positive error")) } func TestWithHostID(t *testing.T) { mockHostIDProvider() t.Cleanup(restoreHostIDProvider) ctx := context.Background() res, err := resource.New(ctx, resource.WithHostID(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "host.id": "f2c668b579780554f70f72a063dc0864", }, toMap(res)) } func TestWithHostIDError(t *testing.T) { mockHostIDProviderWithError() t.Cleanup(restoreHostIDProvider) ctx := context.Background() res, err := resource.New(ctx, resource.WithHostID(), ) assert.ErrorIs(t, err, assert.AnError) require.EqualValues(t, map[string]string{}, toMap(res)) } func TestWithOSType(t *testing.T) { mockRuntimeProviders() t.Cleanup(restoreAttributesProviders) ctx := context.Background() res, err := resource.New(ctx, resource.WithOSType(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "os.type": "linux", }, toMap(res)) } func TestWithOSDescription(t *testing.T) { mockRuntimeProviders() t.Cleanup(restoreAttributesProviders) ctx := context.Background() res, err := resource.New(ctx, resource.WithOSDescription(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "os.description": "Test", }, toMap(res)) } func TestWithOS(t *testing.T) { mockRuntimeProviders() t.Cleanup(restoreAttributesProviders) ctx := context.Background() res, err := resource.New(ctx, resource.WithOS(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "os.type": "linux", "os.description": "Test", }, toMap(res)) } func TestWithProcessPID(t *testing.T) { mockProcessAttributesProvidersWithErrors() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessPID(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.pid": fmt.Sprint(fakePID), }, toMap(res)) } func TestWithProcessExecutableName(t *testing.T) { mockProcessAttributesProvidersWithErrors() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessExecutableName(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.executable.name": fakeExecutableName, }, toMap(res)) } func TestWithProcessExecutablePath(t *testing.T) { mockProcessAttributesProviders() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessExecutablePath(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.executable.path": fakeExecutablePath, }, toMap(res)) } func TestWithProcessCommandArgs(t *testing.T) { mockProcessAttributesProvidersWithErrors() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessCommandArgs(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.command_args": fmt.Sprint(fakeCommandArgs), }, toMap(res)) } func TestWithProcessOwner(t *testing.T) { mockProcessAttributesProviders() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessOwner(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.owner": fakeOwner, }, toMap(res)) } func TestWithProcessRuntimeName(t *testing.T) { mockProcessAttributesProvidersWithErrors() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessRuntimeName(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.runtime.name": fakeRuntimeName, }, toMap(res)) } func TestWithProcessRuntimeVersion(t *testing.T) { mockProcessAttributesProvidersWithErrors() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessRuntimeVersion(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.runtime.version": fakeRuntimeVersion, }, toMap(res)) } func TestWithProcessRuntimeDescription(t *testing.T) { mockProcessAttributesProvidersWithErrors() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcessRuntimeDescription(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.runtime.description": fakeRuntimeDescription, }, toMap(res)) } func TestWithProcess(t *testing.T) { mockProcessAttributesProviders() ctx := context.Background() res, err := resource.New(ctx, resource.WithProcess(), ) require.NoError(t, err) require.EqualValues(t, map[string]string{ "process.pid": fmt.Sprint(fakePID), "process.executable.name": fakeExecutableName, "process.executable.path": fakeExecutablePath, "process.command_args": fmt.Sprint(fakeCommandArgs), "process.owner": fakeOwner, "process.runtime.name": fakeRuntimeName, "process.runtime.version": fakeRuntimeVersion, "process.runtime.description": fakeRuntimeDescription, }, toMap(res)) } func toMap(res *resource.Resource) map[string]string { m := map[string]string{} for _, attr := range res.Attributes() { m[string(attr.Key)] = attr.Value.Emit() } return m } func hostname() string { hn, err := os.Hostname() if err != nil { return fmt.Sprintf("hostname(%s)", err) } return hn } func TestWithContainerID(t *testing.T) { t.Cleanup(restoreAttributesProviders) fakeContainerID := "fake-container-id" testCases := []struct { name string containerIDProvider func() (string, error) expectedResource map[string]string expectedErr bool }{ { name: "get container id", containerIDProvider: func() (string, error) { return fakeContainerID, nil }, expectedResource: map[string]string{ string(semconv.ContainerIDKey): fakeContainerID, }, }, { name: "no container id found", containerIDProvider: func() (string, error) { return "", nil }, expectedResource: map[string]string{}, }, { name: "error", containerIDProvider: func() (string, error) { return "", fmt.Errorf("unable to get container id") }, expectedResource: map[string]string{}, expectedErr: true, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { resource.SetContainerProviders(tc.containerIDProvider) res, err := resource.New(context.Background(), resource.WithContainerID(), ) if tc.expectedErr { assert.Error(t, err) } assert.Equal(t, tc.expectedResource, toMap(res)) }) } } func TestWithContainer(t *testing.T) { t.Cleanup(restoreAttributesProviders) fakeContainerID := "fake-container-id" resource.SetContainerProviders(func() (string, error) { return fakeContainerID, nil }) res, err := resource.New(context.Background(), resource.WithContainer(), ) assert.NoError(t, err) assert.Equal(t, map[string]string{ string(semconv.ContainerIDKey): fakeContainerID, }, toMap(res)) } func TestResourceConcurrentSafe(t *testing.T) { // Creating Resources should also be free of any data races, // because Resources are immutable. var wg sync.WaitGroup for i := 0; i < 2; i++ { wg.Add(1) go func() { defer wg.Done() d := &fakeDetector{} _, err := resource.Detect(context.Background(), d) assert.NoError(t, err) }() } wg.Wait() } type fakeDetector struct{} func (f fakeDetector) Detect(_ context.Context) (*resource.Resource, error) { // A bit pedantic, but resource.NewWithAttributes returns an empty Resource when // no attributes specified. We want to make sure that this is concurrent-safe. return resource.NewWithAttributes("https://opentelemetry.io/schemas/1.3.0"), nil } var _ resource.Detector = &fakeDetector{} opentelemetry-go-1.21.0/sdk/trace/000077500000000000000000000000001452547353200167635ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/trace/batch_span_processor.go000066400000000000000000000272311452547353200235200ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" "sync/atomic" "time" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/internal/env" "go.opentelemetry.io/otel/trace" ) // Defaults for BatchSpanProcessorOptions. const ( DefaultMaxQueueSize = 2048 DefaultScheduleDelay = 5000 DefaultExportTimeout = 30000 DefaultMaxExportBatchSize = 512 ) // BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) // BatchSpanProcessorOptions is configuration settings for a // BatchSpanProcessor. type BatchSpanProcessorOptions struct { // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. // The default value of MaxQueueSize is 2048. MaxQueueSize int // BatchTimeout is the maximum duration for constructing a batch. Processor // forcefully sends available spans when timeout is reached. // The default value of BatchTimeout is 5000 msec. BatchTimeout time.Duration // ExportTimeout specifies the maximum duration for exporting spans. If the timeout // is reached, the export will be cancelled. // The default value of ExportTimeout is 30000 msec. ExportTimeout time.Duration // MaxExportBatchSize is the maximum number of spans to process in a single batch. // If there are more than one batch worth of spans then it processes multiple batches // of spans one batch after the other without any delay. // The default value of MaxExportBatchSize is 512. MaxExportBatchSize int // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full // AND if BlockOnQueueFull is set to true. // Blocking option should be used carefully as it can severely affect the performance of an // application. BlockOnQueueFull bool } // batchSpanProcessor is a SpanProcessor that batches asynchronously-received // spans and sends them to a trace.Exporter when complete. type batchSpanProcessor struct { e SpanExporter o BatchSpanProcessorOptions queue chan ReadOnlySpan dropped uint32 batch []ReadOnlySpan batchMutex sync.Mutex timer *time.Timer stopWait sync.WaitGroup stopOnce sync.Once stopCh chan struct{} stopped atomic.Bool } var _ SpanProcessor = (*batchSpanProcessor)(nil) // NewBatchSpanProcessor creates a new SpanProcessor that will send completed // span batches to the exporter with the supplied options. // // If the exporter is nil, the span processor will perform no action. func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) if maxExportBatchSize > maxQueueSize { if DefaultMaxExportBatchSize > maxQueueSize { maxExportBatchSize = maxQueueSize } else { maxExportBatchSize = DefaultMaxExportBatchSize } } o := BatchSpanProcessorOptions{ BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, MaxQueueSize: maxQueueSize, MaxExportBatchSize: maxExportBatchSize, } for _, opt := range options { opt(&o) } bsp := &batchSpanProcessor{ e: exporter, o: o, batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), timer: time.NewTimer(o.BatchTimeout), queue: make(chan ReadOnlySpan, o.MaxQueueSize), stopCh: make(chan struct{}), } bsp.stopWait.Add(1) go func() { defer bsp.stopWait.Done() bsp.processQueue() bsp.drainQueue() }() return bsp } // OnStart method does nothing. func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} // OnEnd method enqueues a ReadOnlySpan for later processing. func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { // Do not enqueue spans after Shutdown. if bsp.stopped.Load() { return } // Do not enqueue spans if we are just going to drop them. if bsp.e == nil { return } bsp.enqueue(s) } // Shutdown flushes the queue and waits until all spans are processed. // It only executes once. Subsequent call does nothing. func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { var err error bsp.stopOnce.Do(func() { bsp.stopped.Store(true) wait := make(chan struct{}) go func() { close(bsp.stopCh) bsp.stopWait.Wait() if bsp.e != nil { if err := bsp.e.Shutdown(ctx); err != nil { otel.Handle(err) } } close(wait) }() // Wait until the wait group is done or the context is cancelled select { case <-wait: case <-ctx.Done(): err = ctx.Err() } }) return err } type forceFlushSpan struct { ReadOnlySpan flushed chan struct{} } func (f forceFlushSpan) SpanContext() trace.SpanContext { return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) } // ForceFlush exports all ended spans that have not yet been exported. func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { // Interrupt if context is already canceled. if err := ctx.Err(); err != nil { return err } // Do nothing after Shutdown. if bsp.stopped.Load() { return nil } var err error if bsp.e != nil { flushCh := make(chan struct{}) if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { select { case <-bsp.stopCh: // The batchSpanProcessor is Shutdown. return nil case <-flushCh: // Processed any items in queue prior to ForceFlush being called case <-ctx.Done(): return ctx.Err() } } wait := make(chan error) go func() { wait <- bsp.exportSpans(ctx) close(wait) }() // Wait until the export is finished or the context is cancelled/timed out select { case err = <-wait: case <-ctx.Done(): err = ctx.Err() } } return err } // WithMaxQueueSize returns a BatchSpanProcessorOption that configures the // maximum queue size allowed for a BatchSpanProcessor. func WithMaxQueueSize(size int) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.MaxQueueSize = size } } // WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures // the maximum export batch size allowed for a BatchSpanProcessor. func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.MaxExportBatchSize = size } } // WithBatchTimeout returns a BatchSpanProcessorOption that configures the // maximum delay allowed for a BatchSpanProcessor before it will export any // held span (whether the queue is full or not). func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.BatchTimeout = delay } } // WithExportTimeout returns a BatchSpanProcessorOption that configures the // amount of time a BatchSpanProcessor waits for an exporter to export before // abandoning the export. func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.ExportTimeout = timeout } } // WithBlocking returns a BatchSpanProcessorOption that configures a // BatchSpanProcessor to wait for enqueue operations to succeed instead of // dropping data when the queue is full. func WithBlocking() BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.BlockOnQueueFull = true } } // exportSpans is a subroutine of processing and draining the queue. func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { bsp.timer.Reset(bsp.o.BatchTimeout) bsp.batchMutex.Lock() defer bsp.batchMutex.Unlock() if bsp.o.ExportTimeout > 0 { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) defer cancel() } if l := len(bsp.batch); l > 0 { global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. // // It is up to the exporter to implement any type of retry logic if a batch is failing // to be exported, since it is specific to the protocol and backend being sent to. bsp.batch = bsp.batch[:0] if err != nil { return err } } return nil } // processQueue removes spans from the `queue` channel until processor // is shut down. It calls the exporter in batches of up to MaxExportBatchSize // waiting up to BatchTimeout to form a batch. func (bsp *batchSpanProcessor) processQueue() { defer bsp.timer.Stop() ctx, cancel := context.WithCancel(context.Background()) defer cancel() for { select { case <-bsp.stopCh: return case <-bsp.timer.C: if err := bsp.exportSpans(ctx); err != nil { otel.Handle(err) } case sd := <-bsp.queue: if ffs, ok := sd.(forceFlushSpan); ok { close(ffs.flushed) continue } bsp.batchMutex.Lock() bsp.batch = append(bsp.batch, sd) shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize bsp.batchMutex.Unlock() if shouldExport { if !bsp.timer.Stop() { <-bsp.timer.C } if err := bsp.exportSpans(ctx); err != nil { otel.Handle(err) } } } } } // drainQueue awaits the any caller that had added to bsp.stopWait // to finish the enqueue, then exports the final batch. func (bsp *batchSpanProcessor) drainQueue() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() for { select { case sd := <-bsp.queue: if _, ok := sd.(forceFlushSpan); ok { // Ignore flush requests as they are not valid spans. continue } bsp.batchMutex.Lock() bsp.batch = append(bsp.batch, sd) shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize bsp.batchMutex.Unlock() if shouldExport { if err := bsp.exportSpans(ctx); err != nil { otel.Handle(err) } } default: // There are no more enqueued spans. Make final export. if err := bsp.exportSpans(ctx); err != nil { otel.Handle(err) } return } } } func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { ctx := context.TODO() if bsp.o.BlockOnQueueFull { bsp.enqueueBlockOnQueueFull(ctx, sd) } else { bsp.enqueueDrop(ctx, sd) } } func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } select { case bsp.queue <- sd: return true case <-ctx.Done(): return false } } func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } select { case bsp.queue <- sd: return true default: atomic.AddUint32(&bsp.dropped, 1) } return false } // MarshalLog is the marshaling function used by the logging system to represent this exporter. func (bsp *batchSpanProcessor) MarshalLog() interface{} { return struct { Type string SpanExporter SpanExporter Config BatchSpanProcessorOptions }{ Type: "BatchSpanProcessor", SpanExporter: bsp.e, Config: bsp.o, } } opentelemetry-go-1.21.0/sdk/trace/batch_span_processor_test.go000066400000000000000000000427311452547353200245610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace_test import ( "context" "encoding/binary" "errors" "fmt" "os" "sync" "testing" "time" ottest "go.opentelemetry.io/otel/sdk/internal/internaltest" "github.com/go-logr/logr/funcr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/internal/env" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/sdk/trace/tracetest" "go.opentelemetry.io/otel/trace" ) type testBatchExporter struct { mu sync.Mutex spans []sdktrace.ReadOnlySpan sizes []int batchCount int shutdownCount int errors []error droppedCount int idx int err error } func (t *testBatchExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { t.mu.Lock() defer t.mu.Unlock() if t.idx < len(t.errors) { t.droppedCount += len(spans) err := t.errors[t.idx] t.idx++ return err } select { case <-ctx.Done(): t.err = ctx.Err() return ctx.Err() default: } t.spans = append(t.spans, spans...) t.sizes = append(t.sizes, len(spans)) t.batchCount++ return nil } func (t *testBatchExporter) Shutdown(context.Context) error { t.shutdownCount++ return nil } func (t *testBatchExporter) len() int { t.mu.Lock() defer t.mu.Unlock() return len(t.spans) } func (t *testBatchExporter) getBatchCount() int { t.mu.Lock() defer t.mu.Unlock() return t.batchCount } var _ sdktrace.SpanExporter = (*testBatchExporter)(nil) func TestNewBatchSpanProcessorWithNilExporter(t *testing.T) { tp := basicTracerProvider(t) bsp := sdktrace.NewBatchSpanProcessor(nil) tp.RegisterSpanProcessor(bsp) tr := tp.Tracer("NilExporter") _, span := tr.Start(context.Background(), "foo") span.End() // These should not panic. bsp.OnStart(context.Background(), span.(sdktrace.ReadWriteSpan)) bsp.OnEnd(span.(sdktrace.ReadOnlySpan)) if err := bsp.ForceFlush(context.Background()); err != nil { t.Errorf("failed to ForceFlush the BatchSpanProcessor: %v", err) } if err := bsp.Shutdown(context.Background()); err != nil { t.Errorf("failed to Shutdown the BatchSpanProcessor: %v", err) } } type testOption struct { name string o []sdktrace.BatchSpanProcessorOption wantNumSpans int wantBatchCount int genNumSpans int parallel bool envs map[string]string } func TestNewBatchSpanProcessorWithOptions(t *testing.T) { schDelay := 200 * time.Millisecond options := []testOption{ { name: "default BatchSpanProcessorOptions", wantNumSpans: 2053, wantBatchCount: 4, genNumSpans: 2053, }, { name: "non-default BatchTimeout", o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithBatchTimeout(schDelay), }, wantNumSpans: 2053, wantBatchCount: 4, genNumSpans: 2053, }, { name: "non-default MaxQueueSize and BatchTimeout", o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithBatchTimeout(schDelay), sdktrace.WithMaxQueueSize(200), }, wantNumSpans: 205, wantBatchCount: 1, genNumSpans: 205, }, { name: "non-default MaxQueueSize, BatchTimeout and MaxExportBatchSize", o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithBatchTimeout(schDelay), sdktrace.WithMaxQueueSize(205), sdktrace.WithMaxExportBatchSize(20), }, wantNumSpans: 210, wantBatchCount: 11, genNumSpans: 210, }, { name: "blocking option", o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithBatchTimeout(schDelay), sdktrace.WithMaxQueueSize(200), sdktrace.WithMaxExportBatchSize(20), }, wantNumSpans: 205, wantBatchCount: 11, genNumSpans: 205, }, { name: "parallel span generation", o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithBatchTimeout(schDelay), sdktrace.WithMaxQueueSize(200), }, wantNumSpans: 205, wantBatchCount: 1, genNumSpans: 205, parallel: true, }, { name: "parallel span blocking", o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithBatchTimeout(schDelay), sdktrace.WithMaxExportBatchSize(200), }, wantNumSpans: 2000, wantBatchCount: 10, genNumSpans: 2000, parallel: true, }, } for _, option := range options { t.Run(option.name, func(t *testing.T) { te := testBatchExporter{} tp := basicTracerProvider(t) ssp := createAndRegisterBatchSP(option, &te) if ssp == nil { t.Fatalf("%s: Error creating new instance of BatchSpanProcessor\n", option.name) } tp.RegisterSpanProcessor(ssp) tr := tp.Tracer("BatchSpanProcessorWithOptions") if option.parallel { generateSpanParallel(t, tr, option) } else { generateSpan(t, tr, option) } tp.UnregisterSpanProcessor(ssp) gotNumOfSpans := te.len() if option.wantNumSpans > 0 && option.wantNumSpans != gotNumOfSpans { t.Errorf("number of exported span: got %+v, want %+v\n", gotNumOfSpans, option.wantNumSpans) } gotBatchCount := te.getBatchCount() if option.wantBatchCount > 0 && gotBatchCount < option.wantBatchCount { t.Errorf("number batches: got %+v, want >= %+v\n", gotBatchCount, option.wantBatchCount) t.Errorf("Batches %v\n", te.sizes) } }) } } func TestNewBatchSpanProcessorWithEnvOptions(t *testing.T) { options := []testOption{ { name: "BatchSpanProcessorEnvOptions - Basic", wantNumSpans: 2053, wantBatchCount: 1, genNumSpans: 2053, envs: map[string]string{ env.BatchSpanProcessorMaxQueueSizeKey: "5000", env.BatchSpanProcessorMaxExportBatchSizeKey: "5000", }, }, { name: "BatchSpanProcessorEnvOptions - A lager max export batch size than queue size", wantNumSpans: 2053, wantBatchCount: 4, genNumSpans: 2053, envs: map[string]string{ env.BatchSpanProcessorMaxQueueSizeKey: "5000", env.BatchSpanProcessorMaxExportBatchSizeKey: "10000", }, }, { name: "BatchSpanProcessorEnvOptions - A lage max export batch size with a small queue size", wantNumSpans: 2053, wantBatchCount: 42, genNumSpans: 2053, envs: map[string]string{ env.BatchSpanProcessorMaxQueueSizeKey: "50", env.BatchSpanProcessorMaxExportBatchSizeKey: "10000", }, }, } envStore := ottest.NewEnvStore() envStore.Record(env.BatchSpanProcessorScheduleDelayKey) envStore.Record(env.BatchSpanProcessorExportTimeoutKey) envStore.Record(env.BatchSpanProcessorMaxQueueSizeKey) envStore.Record(env.BatchSpanProcessorMaxExportBatchSizeKey) defer func() { require.NoError(t, envStore.Restore()) }() for _, option := range options { t.Run(option.name, func(t *testing.T) { for k, v := range option.envs { require.NoError(t, os.Setenv(k, v)) } te := testBatchExporter{} tp := basicTracerProvider(t) ssp := createAndRegisterBatchSP(option, &te) if ssp == nil { t.Fatalf("%s: Error creating new instance of BatchSpanProcessor\n", option.name) } tp.RegisterSpanProcessor(ssp) tr := tp.Tracer("BatchSpanProcessorWithOptions") if option.parallel { generateSpanParallel(t, tr, option) } else { generateSpan(t, tr, option) } tp.UnregisterSpanProcessor(ssp) gotNumOfSpans := te.len() if option.wantNumSpans > 0 && option.wantNumSpans != gotNumOfSpans { t.Errorf("number of exported span: got %+v, want %+v\n", gotNumOfSpans, option.wantNumSpans) } gotBatchCount := te.getBatchCount() if option.wantBatchCount > 0 && gotBatchCount < option.wantBatchCount { t.Errorf("number batches: got %+v, want >= %+v\n", gotBatchCount, option.wantBatchCount) t.Errorf("Batches %v\n", te.sizes) } }) } } type stuckExporter struct { testBatchExporter } // ExportSpans waits for ctx to expire and returns that error. func (e *stuckExporter) ExportSpans(ctx context.Context, _ []sdktrace.ReadOnlySpan) error { <-ctx.Done() e.err = ctx.Err() return ctx.Err() } func TestBatchSpanProcessorExportTimeout(t *testing.T) { exp := new(stuckExporter) bsp := sdktrace.NewBatchSpanProcessor( exp, // Set a non-zero export timeout so a deadline is set. sdktrace.WithExportTimeout(1*time.Microsecond), sdktrace.WithBlocking(), ) tp := basicTracerProvider(t) tp.RegisterSpanProcessor(bsp) tr := tp.Tracer("BatchSpanProcessorExportTimeout") generateSpan(t, tr, testOption{genNumSpans: 1}) tp.UnregisterSpanProcessor(bsp) if exp.err != context.DeadlineExceeded { t.Errorf("context deadline error not returned: got %+v", exp.err) } } func createAndRegisterBatchSP(option testOption, te *testBatchExporter) sdktrace.SpanProcessor { // Always use blocking queue to avoid flaky tests. options := append(option.o, sdktrace.WithBlocking()) return sdktrace.NewBatchSpanProcessor(te, options...) } func generateSpan(t *testing.T, tr trace.Tracer, option testOption) { sc := getSpanContext() for i := 0; i < option.genNumSpans; i++ { tid := sc.TraceID() binary.BigEndian.PutUint64(tid[0:8], uint64(i+1)) newSc := sc.WithTraceID(tid) ctx := trace.ContextWithRemoteSpanContext(context.Background(), newSc) _, span := tr.Start(ctx, option.name) span.End() } } func generateSpanParallel(t *testing.T, tr trace.Tracer, option testOption) { sc := getSpanContext() wg := &sync.WaitGroup{} for i := 0; i < option.genNumSpans; i++ { tid := sc.TraceID() binary.BigEndian.PutUint64(tid[0:8], uint64(i+1)) wg.Add(1) go func(sc trace.SpanContext) { ctx := trace.ContextWithRemoteSpanContext(context.Background(), sc) _, span := tr.Start(ctx, option.name) span.End() wg.Done() }(sc.WithTraceID(tid)) } wg.Wait() } func getSpanContext() trace.SpanContext { tid, _ := trace.TraceIDFromHex("01020304050607080102040810203040") sid, _ := trace.SpanIDFromHex("0102040810203040") return trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, SpanID: sid, TraceFlags: 0x1, }) } func TestBatchSpanProcessorShutdown(t *testing.T) { var bp testBatchExporter bsp := sdktrace.NewBatchSpanProcessor(&bp) err := bsp.Shutdown(context.Background()) if err != nil { t.Error("Error shutting the BatchSpanProcessor down\n") } assert.Equal(t, 1, bp.shutdownCount, "shutdown from span exporter not called") // Multiple call to Shutdown() should not panic. err = bsp.Shutdown(context.Background()) if err != nil { t.Error("Error shutting the BatchSpanProcessor down\n") } assert.Equal(t, 1, bp.shutdownCount) } func TestBatchSpanProcessorPostShutdown(t *testing.T) { tp := basicTracerProvider(t) be := testBatchExporter{} bsp := sdktrace.NewBatchSpanProcessor(&be) tp.RegisterSpanProcessor(bsp) tr := tp.Tracer("Normal") generateSpanParallel(t, tr, testOption{ o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithMaxExportBatchSize(50), }, genNumSpans: 60, }) require.NoError(t, bsp.Shutdown(context.Background()), "shutting down BatchSpanProcessor") lenJustAfterShutdown := be.len() _, span := tr.Start(context.Background(), "foo") span.End() assert.NoError(t, bsp.ForceFlush(context.Background()), "force flushing BatchSpanProcessor") assert.Equal(t, lenJustAfterShutdown, be.len(), "OnEnd and ForceFlush should have no effect after Shutdown") } func TestBatchSpanProcessorForceFlushSucceeds(t *testing.T) { te := testBatchExporter{} tp := basicTracerProvider(t) option := testOption{ name: "default BatchSpanProcessorOptions", o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithMaxQueueSize(0), sdktrace.WithMaxExportBatchSize(3000), }, wantNumSpans: 2053, wantBatchCount: 1, genNumSpans: 2053, } ssp := createAndRegisterBatchSP(option, &te) if ssp == nil { t.Fatalf("%s: Error creating new instance of BatchSpanProcessor\n", option.name) } tp.RegisterSpanProcessor(ssp) tr := tp.Tracer("BatchSpanProcessorWithOption") if option.parallel { generateSpanParallel(t, tr, option) } else { generateSpan(t, tr, option) } // Force flush any held span batches err := ssp.ForceFlush(context.Background()) assertMaxSpanDiff(t, te.len(), option.wantNumSpans, 10) gotBatchCount := te.getBatchCount() if gotBatchCount < option.wantBatchCount { t.Errorf("number batches: got %+v, want >= %+v\n", gotBatchCount, option.wantBatchCount) t.Errorf("Batches %v\n", te.sizes) } assert.NoError(t, err) } func TestBatchSpanProcessorDropBatchIfFailed(t *testing.T) { te := testBatchExporter{ errors: []error{errors.New("fail to export")}, } tp := basicTracerProvider(t) option := testOption{ o: []sdktrace.BatchSpanProcessorOption{ sdktrace.WithMaxQueueSize(0), sdktrace.WithMaxExportBatchSize(2000), }, wantNumSpans: 1000, wantBatchCount: 1, genNumSpans: 1000, } ssp := createAndRegisterBatchSP(option, &te) if ssp == nil { t.Fatalf("%s: Error creating new instance of BatchSpanProcessor\n", option.name) } tp.RegisterSpanProcessor(ssp) tr := tp.Tracer("BatchSpanProcessorWithOption") if option.parallel { generateSpanParallel(t, tr, option) } else { generateSpan(t, tr, option) } // Force flush any held span batches err := ssp.ForceFlush(context.Background()) assert.Error(t, err) assert.EqualError(t, err, "fail to export") // First flush will fail, nothing should be exported. assertMaxSpanDiff(t, te.droppedCount, option.wantNumSpans, 10) assert.Equal(t, 0, te.len()) assert.Equal(t, 0, te.getBatchCount()) // Generate a new batch, this will succeed if option.parallel { generateSpanParallel(t, tr, option) } else { generateSpan(t, tr, option) } // Force flush any held span batches err = ssp.ForceFlush(context.Background()) assert.NoError(t, err) assertMaxSpanDiff(t, te.len(), option.wantNumSpans, 10) gotBatchCount := te.getBatchCount() if gotBatchCount < option.wantBatchCount { t.Errorf("number batches: got %+v, want >= %+v\n", gotBatchCount, option.wantBatchCount) t.Errorf("Batches %v\n", te.sizes) } } func assertMaxSpanDiff(t *testing.T, want, got, maxDif int) { spanDifference := want - got if spanDifference < 0 { spanDifference = spanDifference * -1 } if spanDifference > maxDif { t.Errorf("number of exported span not equal to or within %d less than: got %+v, want %+v\n", maxDif, got, want) } } type indefiniteExporter struct{} func (indefiniteExporter) Shutdown(context.Context) error { return nil } func (indefiniteExporter) ExportSpans(ctx context.Context, _ []sdktrace.ReadOnlySpan) error { <-ctx.Done() return ctx.Err() } func TestBatchSpanProcessorForceFlushCancellation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) // Cancel the context cancel() bsp := sdktrace.NewBatchSpanProcessor(indefiniteExporter{}) if got, want := bsp.ForceFlush(ctx), context.Canceled; !errors.Is(got, want) { t.Errorf("expected %q error, got %v", want, got) } } func TestBatchSpanProcessorForceFlushTimeout(t *testing.T) { // Add timeout to context to test deadline ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() <-ctx.Done() bsp := sdktrace.NewBatchSpanProcessor(indefiniteExporter{}) if got, want := bsp.ForceFlush(ctx), context.DeadlineExceeded; !errors.Is(got, want) { t.Errorf("expected %q error, got %v", want, got) } } func TestBatchSpanProcessorForceFlushQueuedSpans(t *testing.T) { ctx := context.Background() exp := tracetest.NewInMemoryExporter() tp := sdktrace.NewTracerProvider( sdktrace.WithBatcher(exp), ) tracer := tp.Tracer("tracer") for i := 0; i < 10; i++ { _, span := tracer.Start(ctx, fmt.Sprintf("span%d", i)) span.End() err := tp.ForceFlush(ctx) assert.NoError(t, err) assert.Len(t, exp.GetSpans(), i+1) } } func TestBatchSpanProcessorConcurrentSafe(t *testing.T) { ctx := context.Background() var bp testBatchExporter bsp := sdktrace.NewBatchSpanProcessor(&bp) tp := basicTracerProvider(t) tp.RegisterSpanProcessor(bsp) tr := tp.Tracer(t.Name()) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() generateSpan(t, tr, testOption{genNumSpans: 1}) }() wg.Add(1) go func() { defer wg.Done() _ = bsp.ForceFlush(ctx) }() wg.Add(1) go func() { defer wg.Done() _ = bsp.Shutdown(ctx) }() wg.Add(1) go func() { defer wg.Done() _ = tp.ForceFlush(ctx) }() wg.Add(1) go func() { defer wg.Done() _ = tp.Shutdown(ctx) }() wg.Wait() } func BenchmarkSpanProcessor(b *testing.B) { tp := sdktrace.NewTracerProvider( sdktrace.WithBatcher( tracetest.NewNoopExporter(), sdktrace.WithMaxExportBatchSize(10), )) tracer := tp.Tracer("bench") ctx := context.Background() b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { for j := 0; j < 10; j++ { _, span := tracer.Start(ctx, "bench") span.End() } } } func BenchmarkSpanProcessorVerboseLogging(b *testing.B) { global.SetLogger(funcr.New(func(prefix, args string) {}, funcr.Options{Verbosity: 5})) tp := sdktrace.NewTracerProvider( sdktrace.WithBatcher( tracetest.NewNoopExporter(), sdktrace.WithMaxExportBatchSize(10), )) tracer := tp.Tracer("bench") ctx := context.Background() b.ResetTimer() b.ReportAllocs() for i := 0; i < b.N; i++ { for j := 0; j < 10; j++ { _, span := tracer.Start(ctx, "bench") span.End() } } } opentelemetry-go-1.21.0/sdk/trace/benchmark_test.go000066400000000000000000000215611452547353200223100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace_test import ( "context" "fmt" "testing" "time" "go.opentelemetry.io/otel/attribute" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) func benchmarkSpanLimits(b *testing.B, limits sdktrace.SpanLimits) { tp := sdktrace.NewTracerProvider(sdktrace.WithSpanLimits(limits)) tracer := tp.Tracer(b.Name()) ctx := context.Background() const count = 8 attrs := []attribute.KeyValue{ attribute.Bool("bool", true), attribute.BoolSlice("boolSlice", []bool{true, false}), attribute.Int("int", 42), attribute.IntSlice("intSlice", []int{42, -1}), attribute.Int64("int64", 42), attribute.Int64Slice("int64Slice", []int64{42, -1}), attribute.Float64("float64", 42), attribute.Float64Slice("float64Slice", []float64{42, -1}), attribute.String("string", "value"), attribute.StringSlice("stringSlice", []string{"value", "value-1"}), } links := make([]trace.Link, count) for i := range links { links[i] = trace.Link{ SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{0x01}, SpanID: [8]byte{0x01}, }), Attributes: attrs, } } events := make([]struct { name string attr []attribute.KeyValue }, count) for i := range events { events[i] = struct { name string attr []attribute.KeyValue }{ name: fmt.Sprintf("event-%d", i), attr: attrs, } } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := tracer.Start(ctx, "span-name", trace.WithLinks(links...)) span.SetAttributes(attrs...) for _, e := range events { span.AddEvent(e.name, trace.WithAttributes(e.attr...)) } span.End() } } func BenchmarkSpanLimits(b *testing.B) { b.Run("AttributeValueLengthLimit", func(b *testing.B) { limits := sdktrace.NewSpanLimits() limits.AttributeValueLengthLimit = 2 benchmarkSpanLimits(b, limits) }) b.Run("AttributeCountLimit", func(b *testing.B) { limits := sdktrace.NewSpanLimits() limits.AttributeCountLimit = 1 benchmarkSpanLimits(b, limits) }) b.Run("EventCountLimit", func(b *testing.B) { limits := sdktrace.NewSpanLimits() limits.EventCountLimit = 1 benchmarkSpanLimits(b, limits) }) b.Run("LinkCountLimit", func(b *testing.B) { limits := sdktrace.NewSpanLimits() limits.LinkCountLimit = 1 benchmarkSpanLimits(b, limits) }) b.Run("AttributePerEventCountLimit", func(b *testing.B) { limits := sdktrace.NewSpanLimits() limits.AttributePerEventCountLimit = 1 benchmarkSpanLimits(b, limits) }) b.Run("AttributePerLinkCountLimit", func(b *testing.B) { limits := sdktrace.NewSpanLimits() limits.AttributePerLinkCountLimit = 1 benchmarkSpanLimits(b, limits) }) } func BenchmarkSpanSetAttributesOverCapacity(b *testing.B) { limits := sdktrace.NewSpanLimits() limits.AttributeCountLimit = 1 tp := sdktrace.NewTracerProvider(sdktrace.WithSpanLimits(limits)) tracer := tp.Tracer("BenchmarkSpanSetAttributesOverCapacity") ctx := context.Background() attrs := make([]attribute.KeyValue, 128) for i := range attrs { key := fmt.Sprintf("key-%d", i) attrs[i] = attribute.Bool(key, true) } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := tracer.Start(ctx, "/foo") span.SetAttributes(attrs...) span.End() } } func BenchmarkStartEndSpan(b *testing.B) { traceBenchmark(b, "Benchmark StartEndSpan", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.End() } }) } func BenchmarkSpanWithAttributes_4(b *testing.B) { traceBenchmark(b, "Benchmark Start With 4 Attributes", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.SetAttributes( attribute.Bool("key1", false), attribute.String("key2", "hello"), attribute.Int64("key3", 123), attribute.Float64("key4", 123.456), ) span.End() } }) } func BenchmarkSpanWithAttributes_8(b *testing.B) { traceBenchmark(b, "Benchmark Start With 8 Attributes", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.SetAttributes( attribute.Bool("key1", false), attribute.String("key2", "hello"), attribute.Int64("key3", 123), attribute.Float64("key4", 123.456), attribute.Bool("key21", false), attribute.String("key22", "hello"), attribute.Int64("key23", 123), attribute.Float64("key24", 123.456), ) span.End() } }) } func BenchmarkSpanWithAttributes_all(b *testing.B) { traceBenchmark(b, "Benchmark Start With all Attribute types", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.SetAttributes( attribute.Bool("key1", false), attribute.String("key2", "hello"), attribute.Int64("key3", 123), attribute.Float64("key7", 123.456), attribute.Int("key9", 123), ) span.End() } }) } func BenchmarkSpanWithAttributes_all_2x(b *testing.B) { traceBenchmark(b, "Benchmark Start With all Attributes types twice", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.SetAttributes( attribute.Bool("key1", false), attribute.String("key2", "hello"), attribute.Int64("key3", 123), attribute.Float64("key7", 123.456), attribute.Int("key10", 123), attribute.Bool("key21", false), attribute.String("key22", "hello"), attribute.Int64("key23", 123), attribute.Float64("key27", 123.456), attribute.Int("key210", 123), ) span.End() } }) } func BenchmarkSpanWithEvents_4(b *testing.B) { traceBenchmark(b, "Benchmark Start With 4 Events", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.AddEvent("event1") span.AddEvent("event2") span.AddEvent("event3") span.AddEvent("event4") span.End() } }) } func BenchmarkSpanWithEvents_8(b *testing.B) { traceBenchmark(b, "Benchmark Start With 4 Events", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.AddEvent("event1") span.AddEvent("event2") span.AddEvent("event3") span.AddEvent("event4") span.AddEvent("event5") span.AddEvent("event6") span.AddEvent("event7") span.AddEvent("event8") span.End() } }) } func BenchmarkSpanWithEvents_WithStackTrace(b *testing.B) { traceBenchmark(b, "Benchmark Start With 4 Attributes", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.AddEvent("event1", trace.WithStackTrace(true)) span.End() } }) } func BenchmarkSpanWithEvents_WithTimestamp(b *testing.B) { traceBenchmark(b, "Benchmark Start With 4 Attributes", func(b *testing.B, t trace.Tracer) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := t.Start(ctx, "/foo") span.AddEvent("event1", trace.WithTimestamp(time.Unix(0, 0))) span.End() } }) } func BenchmarkTraceID_DotString(b *testing.B) { t, _ := trace.TraceIDFromHex("0000000000000001000000000000002a") sc := trace.NewSpanContext(trace.SpanContextConfig{TraceID: t}) want := "0000000000000001000000000000002a" for i := 0; i < b.N; i++ { if got := sc.TraceID().String(); got != want { b.Fatalf("got = %q want = %q", got, want) } } } func BenchmarkSpanID_DotString(b *testing.B) { sc := trace.NewSpanContext(trace.SpanContextConfig{SpanID: trace.SpanID{1}}) want := "0100000000000000" for i := 0; i < b.N; i++ { if got := sc.SpanID().String(); got != want { b.Fatalf("got = %q want = %q", got, want) } } } func traceBenchmark(b *testing.B, name string, fn func(*testing.B, trace.Tracer)) { b.Run("AlwaysSample", func(b *testing.B) { b.ReportAllocs() fn(b, tracer(b, name, sdktrace.AlwaysSample())) }) b.Run("NeverSample", func(b *testing.B) { b.ReportAllocs() fn(b, tracer(b, name, sdktrace.NeverSample())) }) } func tracer(b *testing.B, name string, sampler sdktrace.Sampler) trace.Tracer { tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sampler)) return tp.Tracer(name) } opentelemetry-go-1.21.0/sdk/trace/doc.go000066400000000000000000000015051452547353200200600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package trace contains support for OpenTelemetry distributed tracing. The following assumes a basic familiarity with OpenTelemetry concepts. See https://opentelemetry.io. */ package trace // import "go.opentelemetry.io/otel/sdk/trace" opentelemetry-go-1.21.0/sdk/trace/event.go000066400000000000000000000021661452547353200204400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "time" "go.opentelemetry.io/otel/attribute" ) // Event is a thing that happened during a Span's lifetime. type Event struct { // Name is the name of this event Name string // Attributes describe the aspects of the event. Attributes []attribute.KeyValue // DroppedAttributeCount is the number of attributes that were not // recorded due to configured limits being reached. DroppedAttributeCount int // Time at which this event was recorded. Time time.Time } opentelemetry-go-1.21.0/sdk/trace/evictedqueue.go000066400000000000000000000027031452547353200220040ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" // evictedQueue is a FIFO queue with a configurable capacity. type evictedQueue struct { queue []interface{} capacity int droppedCount int } func newEvictedQueue(capacity int) evictedQueue { // Do not pre-allocate queue, do this lazily. return evictedQueue{capacity: capacity} } // add adds value to the evictedQueue eq. If eq is at capacity, the oldest // queued value will be discarded and the drop count incremented. func (eq *evictedQueue) add(value interface{}) { if eq.capacity == 0 { eq.droppedCount++ return } if eq.capacity > 0 && len(eq.queue) == eq.capacity { // Drop first-in while avoiding allocating more capacity to eq.queue. copy(eq.queue[:eq.capacity-1], eq.queue[1:]) eq.queue = eq.queue[:eq.capacity-1] eq.droppedCount++ } eq.queue = append(eq.queue, value) } opentelemetry-go-1.21.0/sdk/trace/evictedqueue_test.go000066400000000000000000000033641452547353200230470ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "reflect" "testing" ) func init() { } func TestAdd(t *testing.T) { q := newEvictedQueue(3) q.add("value1") q.add("value2") if wantLen, gotLen := 2, len(q.queue); wantLen != gotLen { t.Errorf("got queue length %d want %d", gotLen, wantLen) } } func (eq *evictedQueue) queueToArray() []string { arr := make([]string, 0) for _, value := range eq.queue { arr = append(arr, value.(string)) } return arr } func TestDropCount(t *testing.T) { q := newEvictedQueue(3) q.add("value1") q.add("value2") q.add("value3") q.add("value1") q.add("value4") if wantLen, gotLen := 3, len(q.queue); wantLen != gotLen { t.Errorf("got queue length %d want %d", gotLen, wantLen) } if wantDropCount, gotDropCount := 2, q.droppedCount; wantDropCount != gotDropCount { t.Errorf("got drop count %d want %d", gotDropCount, wantDropCount) } wantArr := []string{"value3", "value1", "value4"} gotArr := q.queueToArray() if wantLen, gotLen := len(wantArr), len(gotArr); gotLen != wantLen { t.Errorf("got array len %d want %d", gotLen, wantLen) } if !reflect.DeepEqual(gotArr, wantArr) { t.Errorf("got array = %#v; want %#v", gotArr, wantArr) } } opentelemetry-go-1.21.0/sdk/trace/id_generator.go000066400000000000000000000046411452547353200217610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" crand "crypto/rand" "encoding/binary" "math/rand" "sync" "go.opentelemetry.io/otel/trace" ) // IDGenerator allows custom generators for TraceID and SpanID. type IDGenerator interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // NewIDs returns a new trace and span ID. NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // NewSpanID returns a ID for a new span in the trace with traceID. NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } type randomIDGenerator struct { sync.Mutex randSource *rand.Rand } var _ IDGenerator = &randomIDGenerator{} // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { gen.Lock() defer gen.Unlock() sid := trace.SpanID{} _, _ = gen.randSource.Read(sid[:]) return sid } // NewIDs returns a non-zero trace ID and a non-zero span ID from a // randomly-chosen sequence. func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { gen.Lock() defer gen.Unlock() tid := trace.TraceID{} _, _ = gen.randSource.Read(tid[:]) sid := trace.SpanID{} _, _ = gen.randSource.Read(sid[:]) return tid, sid } func defaultIDGenerator() IDGenerator { gen := &randomIDGenerator{} var rngSeed int64 _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) gen.randSource = rand.New(rand.NewSource(rngSeed)) return gen } opentelemetry-go-1.21.0/sdk/trace/id_generator_test.go000066400000000000000000000027421452547353200230200ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/trace" ) func TestNewIDs(t *testing.T) { gen := defaultIDGenerator() n := 1000 for i := 0; i < n; i++ { traceID, spanID := gen.NewIDs(context.Background()) assert.Truef(t, traceID.IsValid(), "trace id: %s", traceID.String()) assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String()) } } func TestNewSpanID(t *testing.T) { gen := defaultIDGenerator() testTraceID := [16]byte{123, 123} n := 1000 for i := 0; i < n; i++ { spanID := gen.NewSpanID(context.Background(), testTraceID) assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String()) } } func TestNewSpanIDWithInvalidTraceID(t *testing.T) { gen := defaultIDGenerator() spanID := gen.NewSpanID(context.Background(), trace.TraceID{}) assert.Truef(t, spanID.IsValid(), "span id: %s", spanID.String()) } opentelemetry-go-1.21.0/sdk/trace/link.go000066400000000000000000000022441452547353200202510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) // Link is the relationship between two Spans. The relationship can be within // the same Trace or across different Traces. type Link struct { // SpanContext of the linked Span. SpanContext trace.SpanContext // Attributes describe the aspects of the link. Attributes []attribute.KeyValue // DroppedAttributeCount is the number of attributes that were not // recorded due to configured limits being reached. DroppedAttributeCount int } opentelemetry-go-1.21.0/sdk/trace/provider.go000066400000000000000000000370741452547353200211570ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "fmt" "sync" "sync/atomic" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) const ( defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" ) // tracerProviderConfig. type tracerProviderConfig struct { // processors contains collection of SpanProcessors that are processing pipeline // for spans in the trace signal. // SpanProcessors registered with a TracerProvider and are called at the start // and end of a Span's lifecycle, and are called in the order they are // registered. processors []SpanProcessor // sampler is the default sampler used when creating new spans. sampler Sampler // idGenerator is used to generate all Span and Trace IDs when needed. idGenerator IDGenerator // spanLimits defines the attribute, event, and link limits for spans. spanLimits SpanLimits // resource contains attributes representing an entity that produces telemetry. resource *resource.Resource } // MarshalLog is the marshaling function used by the logging system to represent this exporter. func (cfg tracerProviderConfig) MarshalLog() interface{} { return struct { SpanProcessors []SpanProcessor SamplerType string IDGeneratorType string SpanLimits SpanLimits Resource *resource.Resource }{ SpanProcessors: cfg.processors, SamplerType: fmt.Sprintf("%T", cfg.sampler), IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), SpanLimits: cfg.spanLimits, Resource: cfg.resource, } } // TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to // instrumentation so it can trace operational flow through a system. type TracerProvider struct { embedded.TracerProvider mu sync.Mutex namedTracer map[instrumentation.Scope]*tracer spanProcessors atomic.Pointer[spanProcessorStates] isShutdown atomic.Bool // These fields are not protected by the lock mu. They are assumed to be // immutable after creation of the TracerProvider. sampler Sampler idGenerator IDGenerator spanLimits SpanLimits resource *resource.Resource } var _ trace.TracerProvider = &TracerProvider{} // NewTracerProvider returns a new and configured TracerProvider. // // By default the returned TracerProvider is configured with: // - a ParentBased(AlwaysSample) Sampler // - a random number IDGenerator // - the resource.Default() Resource // - the default SpanLimits. // // The passed opts are used to override these default values and configure the // returned TracerProvider appropriately. func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { o := tracerProviderConfig{ spanLimits: NewSpanLimits(), } o = applyTracerProviderEnvConfigs(o) for _, opt := range opts { o = opt.apply(o) } o = ensureValidTracerProviderConfig(o) tp := &TracerProvider{ namedTracer: make(map[instrumentation.Scope]*tracer), sampler: o.sampler, idGenerator: o.idGenerator, spanLimits: o.spanLimits, resource: o.resource, } global.Info("TracerProvider created", "config", o) spss := make(spanProcessorStates, 0, len(o.processors)) for _, sp := range o.processors { spss = append(spss, newSpanProcessorState(sp)) } tp.spanProcessors.Store(&spss) return tp } // Tracer returns a Tracer with the given name and options. If a Tracer for // the given name and options does not exist it is created, otherwise the // existing Tracer is returned. // // If name is empty, DefaultTracerName is used instead. // // This method is safe to be called concurrently. func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). if p.isShutdown.Load() { return noop.NewTracerProvider().Tracer(name, opts...) } c := trace.NewTracerConfig(opts...) if name == "" { name = defaultTracerName } is := instrumentation.Scope{ Name: name, Version: c.InstrumentationVersion(), SchemaURL: c.SchemaURL(), } t, ok := func() (trace.Tracer, bool) { p.mu.Lock() defer p.mu.Unlock() // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran // after the first check above but before we acquired the mutex. if p.isShutdown.Load() { return noop.NewTracerProvider().Tracer(name, opts...), true } t, ok := p.namedTracer[is] if !ok { t = &tracer{ provider: p, instrumentationScope: is, } p.namedTracer[is] = t } return t, ok }() if !ok { // This code is outside the mutex to not hold the lock while calling third party logging code: // - That code may do slow things like I/O, which would prolong the duration the lock is held, // slowing down all tracing consumers. // - Logging code may be instrumented with tracing and deadlock because it could try // acquiring the same non-reentrant mutex. global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) } return t } // RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. if p.isShutdown.Load() { return } p.mu.Lock() defer p.mu.Unlock() // This check prevents calls after a shutdown. if p.isShutdown.Load() { return } current := p.getSpanProcessors() newSPS := make(spanProcessorStates, 0, len(current)+1) newSPS = append(newSPS, current...) newSPS = append(newSPS, newSpanProcessorState(sp)) p.spanProcessors.Store(&newSPS) } // UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { // This check prevents calls during a shutdown. if p.isShutdown.Load() { return } p.mu.Lock() defer p.mu.Unlock() // This check prevents calls after a shutdown. if p.isShutdown.Load() { return } old := p.getSpanProcessors() if len(old) == 0 { return } spss := make(spanProcessorStates, len(old)) copy(spss, old) // stop the span processor if it is started and remove it from the list var stopOnce *spanProcessorState var idx int for i, sps := range spss { if sps.sp == sp { stopOnce = sps idx = i } } if stopOnce != nil { stopOnce.state.Do(func() { if err := sp.Shutdown(context.Background()); err != nil { otel.Handle(err) } }) } if len(spss) > 1 { copy(spss[idx:], spss[idx+1:]) } spss[len(spss)-1] = nil spss = spss[:len(spss)-1] p.spanProcessors.Store(&spss) } // ForceFlush immediately exports all spans that have not yet been exported for // all the registered span processors. func (p *TracerProvider) ForceFlush(ctx context.Context) error { spss := p.getSpanProcessors() if len(spss) == 0 { return nil } for _, sps := range spss { select { case <-ctx.Done(): return ctx.Err() default: } if err := sps.sp.ForceFlush(ctx); err != nil { return err } } return nil } // Shutdown shuts down TracerProvider. All registered span processors are shut down // in the order they were registered and any held computational resources are released. // After Shutdown is called, all methods are no-ops. func (p *TracerProvider) Shutdown(ctx context.Context) error { // This check prevents deadlocks in case of recursive shutdown. if p.isShutdown.Load() { return nil } p.mu.Lock() defer p.mu.Unlock() // This check prevents calls after a shutdown has already been done concurrently. if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? return nil } var retErr error for _, sps := range p.getSpanProcessors() { select { case <-ctx.Done(): return ctx.Err() default: } var err error sps.state.Do(func() { err = sps.sp.Shutdown(ctx) }) if err != nil { if retErr == nil { retErr = err } else { // Poor man's list of errors retErr = fmt.Errorf("%v; %v", retErr, err) } } } p.spanProcessors.Store(&spanProcessorStates{}) return retErr } func (p *TracerProvider) getSpanProcessors() spanProcessorStates { return *(p.spanProcessors.Load()) } // TracerProviderOption configures a TracerProvider. type TracerProviderOption interface { apply(tracerProviderConfig) tracerProviderConfig } type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { return fn(cfg) } // WithSyncer registers the exporter with the TracerProvider using a // SimpleSpanProcessor. // // This is not recommended for production use. The synchronous nature of the // SimpleSpanProcessor that will wrap the exporter make it good for testing, // debugging, or showing examples of other feature, but it will be slow and // have a high computation resource usage overhead. The WithBatcher option is // recommended for production use instead. func WithSyncer(e SpanExporter) TracerProviderOption { return WithSpanProcessor(NewSimpleSpanProcessor(e)) } // WithBatcher registers the exporter with the TracerProvider using a // BatchSpanProcessor configured with the passed opts. func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) } // WithSpanProcessor registers the SpanProcessor with a TracerProvider. func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { cfg.processors = append(cfg.processors, sp) return cfg }) } // WithResource returns a TracerProviderOption that will configure the // Resource r as a TracerProvider's Resource. The configured Resource is // referenced by all the Tracers the TracerProvider creates. It represents the // entity producing telemetry. // // If this option is not used, the TracerProvider will use the // resource.Default() Resource by default. func WithResource(r *resource.Resource) TracerProviderOption { return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { var err error cfg.resource, err = resource.Merge(resource.Environment(), r) if err != nil { otel.Handle(err) } return cfg }) } // WithIDGenerator returns a TracerProviderOption that will configure the // IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator // is used by the Tracers the TracerProvider creates to generate new Span and // Trace IDs. // // If this option is not used, the TracerProvider will use a random number // IDGenerator by default. func WithIDGenerator(g IDGenerator) TracerProviderOption { return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { if g != nil { cfg.idGenerator = g } return cfg }) } // WithSampler returns a TracerProviderOption that will configure the Sampler // s as a TracerProvider's Sampler. The configured Sampler is used by the // Tracers the TracerProvider creates to make their sampling decisions for the // Spans they create. // // This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER // and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used // and the sampler is not configured through environment variables or the environment // contains invalid/unsupported configuration, the TracerProvider will use a // ParentBased(AlwaysSample) Sampler by default. func WithSampler(s Sampler) TracerProviderOption { return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { if s != nil { cfg.sampler = s } return cfg }) } // WithSpanLimits returns a TracerProviderOption that configures a // TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span // created by a Tracer from the TracerProvider. // // If any field of sl is zero or negative it will be replaced with the default // value for that field. // // If this or WithRawSpanLimits are not provided, the TracerProvider will use // the limits defined by environment variables, or the defaults if unset. // Refer to the NewSpanLimits documentation for information about this // relationship. // // Deprecated: Use WithRawSpanLimits instead which allows setting unlimited // and zero limits. This option will be kept until the next major version // incremented release. func WithSpanLimits(sl SpanLimits) TracerProviderOption { if sl.AttributeValueLengthLimit <= 0 { sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit } if sl.AttributeCountLimit <= 0 { sl.AttributeCountLimit = DefaultAttributeCountLimit } if sl.EventCountLimit <= 0 { sl.EventCountLimit = DefaultEventCountLimit } if sl.AttributePerEventCountLimit <= 0 { sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit } if sl.LinkCountLimit <= 0 { sl.LinkCountLimit = DefaultLinkCountLimit } if sl.AttributePerLinkCountLimit <= 0 { sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit } return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { cfg.spanLimits = sl return cfg }) } // WithRawSpanLimits returns a TracerProviderOption that configures a // TracerProvider to use these limits. These limits bound any Span created by // a Tracer from the TracerProvider. // // The limits will be used as-is. Zero or negative values will not be changed // to the default value like WithSpanLimits does. Setting a limit to zero will // effectively disable the related resource it limits and setting to a // negative value will mean that resource is unlimited. Consequentially, this // means that the zero-value SpanLimits will disable all span resources. // Because of this, limits should be constructed using NewSpanLimits and // updated accordingly. // // If this or WithSpanLimits are not provided, the TracerProvider will use the // limits defined by environment variables, or the defaults if unset. Refer to // the NewSpanLimits documentation for information about this relationship. func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { cfg.spanLimits = limits return cfg }) } func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { for _, opt := range tracerProviderOptionsFromEnv() { cfg = opt.apply(cfg) } return cfg } func tracerProviderOptionsFromEnv() []TracerProviderOption { var opts []TracerProviderOption sampler, err := samplerFromEnv() if err != nil { otel.Handle(err) } if sampler != nil { opts = append(opts, WithSampler(sampler)) } return opts } // ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { if cfg.sampler == nil { cfg.sampler = ParentBased(AlwaysSample()) } if cfg.idGenerator == nil { cfg.idGenerator = defaultIDGenerator() } if cfg.resource == nil { cfg.resource = resource.Default() } return cfg } opentelemetry-go-1.21.0/sdk/trace/provider_test.go000066400000000000000000000255131452547353200222110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "errors" "fmt" "math/rand" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ottest "go.opentelemetry.io/otel/sdk/internal/internaltest" "go.opentelemetry.io/otel/trace" ) type basicSpanProcessor struct { flushed bool closed bool injectShutdownError error } func (t *basicSpanProcessor) Shutdown(context.Context) error { t.closed = true return t.injectShutdownError } func (t *basicSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} func (t *basicSpanProcessor) OnEnd(ReadOnlySpan) {} func (t *basicSpanProcessor) ForceFlush(context.Context) error { t.flushed = true return nil } type shutdownSpanProcessor struct { shutdown func(context.Context) error } func (t *shutdownSpanProcessor) Shutdown(ctx context.Context) error { return t.shutdown(ctx) } func (t *shutdownSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} func (t *shutdownSpanProcessor) OnEnd(ReadOnlySpan) {} func (t *shutdownSpanProcessor) ForceFlush(context.Context) error { return nil } func TestShutdownCallsTracerMethod(t *testing.T) { stp := NewTracerProvider() sp := &shutdownSpanProcessor{ shutdown: func(ctx context.Context) error { _ = stp.Tracer("abc") // must not deadlock return nil }, } stp.RegisterSpanProcessor(sp) assert.NoError(t, stp.Shutdown(context.Background())) assert.True(t, stp.isShutdown.Load()) } func TestForceFlushAndShutdownTraceProviderWithoutProcessor(t *testing.T) { stp := NewTracerProvider() assert.NoError(t, stp.ForceFlush(context.Background())) assert.NoError(t, stp.Shutdown(context.Background())) assert.True(t, stp.isShutdown.Load()) } func TestUnregisterFirst(t *testing.T) { stp := NewTracerProvider() sp1 := &basicSpanProcessor{} sp2 := &basicSpanProcessor{} sp3 := &basicSpanProcessor{} stp.RegisterSpanProcessor(sp1) stp.RegisterSpanProcessor(sp2) stp.RegisterSpanProcessor(sp3) stp.UnregisterSpanProcessor(sp1) sps := stp.getSpanProcessors() require.Len(t, sps, 2) assert.Same(t, sp2, sps[0].sp) assert.Same(t, sp3, sps[1].sp) } func TestUnregisterMiddle(t *testing.T) { stp := NewTracerProvider() sp1 := &basicSpanProcessor{} sp2 := &basicSpanProcessor{} sp3 := &basicSpanProcessor{} stp.RegisterSpanProcessor(sp1) stp.RegisterSpanProcessor(sp2) stp.RegisterSpanProcessor(sp3) stp.UnregisterSpanProcessor(sp2) sps := stp.getSpanProcessors() require.Len(t, sps, 2) assert.Same(t, sp1, sps[0].sp) assert.Same(t, sp3, sps[1].sp) } func TestUnregisterLast(t *testing.T) { stp := NewTracerProvider() sp1 := &basicSpanProcessor{} sp2 := &basicSpanProcessor{} sp3 := &basicSpanProcessor{} stp.RegisterSpanProcessor(sp1) stp.RegisterSpanProcessor(sp2) stp.RegisterSpanProcessor(sp3) stp.UnregisterSpanProcessor(sp3) sps := stp.getSpanProcessors() require.Len(t, sps, 2) assert.Same(t, sp1, sps[0].sp) assert.Same(t, sp2, sps[1].sp) } func TestShutdownTraceProvider(t *testing.T) { stp := NewTracerProvider() sp := &basicSpanProcessor{} stp.RegisterSpanProcessor(sp) assert.NoError(t, stp.ForceFlush(context.Background())) assert.True(t, sp.flushed, "error ForceFlush basicSpanProcessor") assert.NoError(t, stp.Shutdown(context.Background())) assert.True(t, stp.isShutdown.Load()) assert.True(t, sp.closed, "error Shutdown basicSpanProcessor") } func TestFailedProcessorShutdown(t *testing.T) { stp := NewTracerProvider() spErr := errors.New("basic span processor shutdown failure") sp := &basicSpanProcessor{ injectShutdownError: spErr, } stp.RegisterSpanProcessor(sp) err := stp.Shutdown(context.Background()) assert.Error(t, err) assert.Equal(t, err, spErr) assert.True(t, stp.isShutdown.Load()) } func TestFailedProcessorsShutdown(t *testing.T) { stp := NewTracerProvider() spErr1 := errors.New("basic span processor shutdown failure1") spErr2 := errors.New("basic span processor shutdown failure2") sp1 := &basicSpanProcessor{ injectShutdownError: spErr1, } sp2 := &basicSpanProcessor{ injectShutdownError: spErr2, } stp.RegisterSpanProcessor(sp1) stp.RegisterSpanProcessor(sp2) err := stp.Shutdown(context.Background()) assert.Error(t, err) assert.EqualError(t, err, "basic span processor shutdown failure1; basic span processor shutdown failure2") assert.True(t, sp1.closed) assert.True(t, sp2.closed) assert.True(t, stp.isShutdown.Load()) } func TestFailedProcessorShutdownInUnregister(t *testing.T) { handler.Reset() stp := NewTracerProvider() spErr := errors.New("basic span processor shutdown failure") sp := &basicSpanProcessor{ injectShutdownError: spErr, } stp.RegisterSpanProcessor(sp) stp.UnregisterSpanProcessor(sp) assert.Contains(t, handler.errs, spErr) err := stp.Shutdown(context.Background()) assert.NoError(t, err) assert.True(t, stp.isShutdown.Load()) } func TestSchemaURL(t *testing.T) { stp := NewTracerProvider() schemaURL := "https://opentelemetry.io/schemas/1.2.0" tracerIface := stp.Tracer("tracername", trace.WithSchemaURL(schemaURL)) // Verify that the SchemaURL of the constructed Tracer is correctly populated. tracerStruct := tracerIface.(*tracer) assert.EqualValues(t, schemaURL, tracerStruct.instrumentationScope.SchemaURL) } func TestRegisterAfterShutdownWithoutProcessors(t *testing.T) { stp := NewTracerProvider() err := stp.Shutdown(context.Background()) assert.NoError(t, err) assert.True(t, stp.isShutdown.Load()) sp := &basicSpanProcessor{} stp.RegisterSpanProcessor(sp) // no-op assert.Empty(t, stp.getSpanProcessors()) } func TestRegisterAfterShutdownWithProcessors(t *testing.T) { stp := NewTracerProvider() sp1 := &basicSpanProcessor{} stp.RegisterSpanProcessor(sp1) err := stp.Shutdown(context.Background()) assert.NoError(t, err) assert.True(t, stp.isShutdown.Load()) assert.Empty(t, stp.getSpanProcessors()) sp2 := &basicSpanProcessor{} stp.RegisterSpanProcessor(sp2) // no-op assert.Empty(t, stp.getSpanProcessors()) } func TestTracerProviderSamplerConfigFromEnv(t *testing.T) { type testCase struct { sampler string samplerArg string argOptional bool description string errorType error invalidArgErrorType interface{} } randFloat := rand.Float64() tests := []testCase{ { sampler: "invalid-sampler", argOptional: true, description: ParentBased(AlwaysSample()).Description(), errorType: errUnsupportedSampler("invalid-sampler"), invalidArgErrorType: func() *errUnsupportedSampler { e := errUnsupportedSampler("invalid-sampler"); return &e }(), }, { sampler: "always_on", argOptional: true, description: AlwaysSample().Description(), }, { sampler: "always_off", argOptional: true, description: NeverSample().Description(), }, { sampler: "traceidratio", samplerArg: fmt.Sprintf("%g", randFloat), description: TraceIDRatioBased(randFloat).Description(), }, { sampler: "traceidratio", samplerArg: fmt.Sprintf("%g", -randFloat), description: TraceIDRatioBased(1.0).Description(), errorType: errNegativeTraceIDRatio, }, { sampler: "traceidratio", samplerArg: fmt.Sprintf("%g", 1+randFloat), description: TraceIDRatioBased(1.0).Description(), errorType: errGreaterThanOneTraceIDRatio, }, { sampler: "traceidratio", argOptional: true, description: TraceIDRatioBased(1.0).Description(), invalidArgErrorType: new(samplerArgParseError), }, { sampler: "parentbased_always_on", argOptional: true, description: ParentBased(AlwaysSample()).Description(), }, { sampler: "parentbased_always_off", argOptional: true, description: ParentBased(NeverSample()).Description(), }, { sampler: "parentbased_traceidratio", samplerArg: fmt.Sprintf("%g", randFloat), description: ParentBased(TraceIDRatioBased(randFloat)).Description(), }, { sampler: "parentbased_traceidratio", samplerArg: fmt.Sprintf("%g", -randFloat), description: ParentBased(TraceIDRatioBased(1.0)).Description(), errorType: errNegativeTraceIDRatio, }, { sampler: "parentbased_traceidratio", samplerArg: fmt.Sprintf("%g", 1+randFloat), description: ParentBased(TraceIDRatioBased(1.0)).Description(), errorType: errGreaterThanOneTraceIDRatio, }, { sampler: "parentbased_traceidratio", argOptional: true, description: ParentBased(TraceIDRatioBased(1.0)).Description(), invalidArgErrorType: new(samplerArgParseError), }, } handler.Reset() for _, test := range tests { t.Run(test.sampler, func(t *testing.T) { envVars := map[string]string{ "OTEL_TRACES_SAMPLER": test.sampler, } if test.samplerArg != "" { envVars["OTEL_TRACES_SAMPLER_ARG"] = test.samplerArg } envStore, err := ottest.SetEnvVariables(envVars) require.NoError(t, err) t.Cleanup(func() { handler.Reset() require.NoError(t, envStore.Restore()) }) stp := NewTracerProvider(WithSyncer(NewTestExporter())) assert.Equal(t, test.description, stp.sampler.Description()) if test.errorType != nil { testStoredError(t, test.errorType) } else { assert.Empty(t, handler.errs) } if test.argOptional { t.Run("invalid sampler arg", func(t *testing.T) { envStore, err := ottest.SetEnvVariables(map[string]string{ "OTEL_TRACES_SAMPLER": test.sampler, "OTEL_TRACES_SAMPLER_ARG": "invalid-ignored-string", }) require.NoError(t, err) t.Cleanup(func() { handler.Reset() require.NoError(t, envStore.Restore()) }) stp := NewTracerProvider(WithSyncer(NewTestExporter())) t.Cleanup(func() { require.NoError(t, stp.Shutdown(context.Background())) }) assert.Equal(t, test.description, stp.sampler.Description()) if test.invalidArgErrorType != nil { testStoredError(t, test.invalidArgErrorType) } else { assert.Empty(t, handler.errs) } }) } }) } } func testStoredError(t *testing.T, target interface{}) { t.Helper() if assert.Len(t, handler.errs, 1) && assert.Error(t, handler.errs[0]) { err := handler.errs[0] require.Implements(t, (*error)(nil), target) require.NotNil(t, target.(error)) defer handler.Reset() if errors.Is(err, target.(error)) { return } assert.ErrorAs(t, err, target) } } opentelemetry-go-1.21.0/sdk/trace/sampler_env.go000066400000000000000000000057141452547353200216340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "errors" "fmt" "os" "strconv" "strings" ) const ( tracesSamplerKey = "OTEL_TRACES_SAMPLER" tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" samplerAlwaysOn = "always_on" samplerAlwaysOff = "always_off" samplerTraceIDRatio = "traceidratio" samplerParentBasedAlwaysOn = "parentbased_always_on" samplerParsedBasedAlwaysOff = "parentbased_always_off" samplerParentBasedTraceIDRatio = "parentbased_traceidratio" ) type errUnsupportedSampler string func (e errUnsupportedSampler) Error() string { return fmt.Sprintf("unsupported sampler: %s", string(e)) } var ( errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") ) type samplerArgParseError struct { parseErr error } func (e samplerArgParseError) Error() string { return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) } func (e samplerArgParseError) Unwrap() error { return e.parseErr } func samplerFromEnv() (Sampler, error) { sampler, ok := os.LookupEnv(tracesSamplerKey) if !ok { return nil, nil } sampler = strings.ToLower(strings.TrimSpace(sampler)) samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) samplerArg = strings.TrimSpace(samplerArg) switch sampler { case samplerAlwaysOn: return AlwaysSample(), nil case samplerAlwaysOff: return NeverSample(), nil case samplerTraceIDRatio: if !hasSamplerArg { return TraceIDRatioBased(1.0), nil } return parseTraceIDRatio(samplerArg) case samplerParentBasedAlwaysOn: return ParentBased(AlwaysSample()), nil case samplerParsedBasedAlwaysOff: return ParentBased(NeverSample()), nil case samplerParentBasedTraceIDRatio: if !hasSamplerArg { return ParentBased(TraceIDRatioBased(1.0)), nil } ratio, err := parseTraceIDRatio(samplerArg) return ParentBased(ratio), err default: return nil, errUnsupportedSampler(sampler) } } func parseTraceIDRatio(arg string) (Sampler, error) { v, err := strconv.ParseFloat(arg, 64) if err != nil { return TraceIDRatioBased(1.0), samplerArgParseError{err} } if v < 0.0 { return TraceIDRatioBased(1.0), errNegativeTraceIDRatio } if v > 1.0 { return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio } return TraceIDRatioBased(v), nil } opentelemetry-go-1.21.0/sdk/trace/sampling.go000066400000000000000000000207571452547353200211370ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "encoding/binary" "fmt" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) // Sampler decides whether a trace should be sampled and exported. type Sampler interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // ShouldSample returns a SamplingResult based on a decision made from the // passed parameters. ShouldSample(parameters SamplingParameters) SamplingResult // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Description returns information describing the Sampler. Description() string // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } // SamplingParameters contains the values passed to a Sampler. type SamplingParameters struct { ParentContext context.Context TraceID trace.TraceID Name string Kind trace.SpanKind Attributes []attribute.KeyValue Links []trace.Link } // SamplingDecision indicates whether a span is dropped, recorded and/or sampled. type SamplingDecision uint8 // Valid sampling decisions. const ( // Drop will not record the span and all attributes/events will be dropped. Drop SamplingDecision = iota // Record indicates the span's `IsRecording() == true`, but `Sampled` flag // *must not* be set. RecordOnly // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag // *must* be set. RecordAndSample ) // SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. type SamplingResult struct { Decision SamplingDecision Attributes []attribute.KeyValue Tracestate trace.TraceState } type traceIDRatioSampler struct { traceIDUpperBound uint64 description string } func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { psc := trace.SpanContextFromContext(p.ParentContext) x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 if x < ts.traceIDUpperBound { return SamplingResult{ Decision: RecordAndSample, Tracestate: psc.TraceState(), } } return SamplingResult{ Decision: Drop, Tracestate: psc.TraceState(), } } func (ts traceIDRatioSampler) Description() string { return ts.description } // TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will // always sample. Fractions < 0 are treated as zero. To respect the // parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used // as a delegate of a `Parent` sampler. // //nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { if fraction >= 1 { return AlwaysSample() } if fraction <= 0 { fraction = 0 } return &traceIDRatioSampler{ traceIDUpperBound: uint64(fraction * (1 << 63)), description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), } } type alwaysOnSampler struct{} func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: RecordAndSample, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } func (as alwaysOnSampler) Description() string { return "AlwaysOnSampler" } // AlwaysSample returns a Sampler that samples every trace. // Be careful about using this sampler in a production application with // significant traffic: a new trace will be started and exported for every // request. func AlwaysSample() Sampler { return alwaysOnSampler{} } type alwaysOffSampler struct{} func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { return SamplingResult{ Decision: Drop, Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), } } func (as alwaysOffSampler) Description() string { return "AlwaysOffSampler" } // NeverSample returns a Sampler that samples no traces. func NeverSample() Sampler { return alwaysOffSampler{} } // ParentBased returns a sampler decorator which behaves differently, // based on the parent of the span. If the span has no parent, // the decorated sampler is used to make sampling decision. If the span has // a parent, depending on whether the parent is remote and whether it // is sampled, one of the following samplers will apply: // - remoteParentSampled(Sampler) (default: AlwaysOn) // - remoteParentNotSampled(Sampler) (default: AlwaysOff) // - localParentSampled(Sampler) (default: AlwaysOn) // - localParentNotSampled(Sampler) (default: AlwaysOff) func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { return parentBased{ root: root, config: configureSamplersForParentBased(samplers), } } type parentBased struct { root Sampler config samplerConfig } func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { c := samplerConfig{ remoteParentSampled: AlwaysSample(), remoteParentNotSampled: NeverSample(), localParentSampled: AlwaysSample(), localParentNotSampled: NeverSample(), } for _, so := range samplers { c = so.apply(c) } return c } // samplerConfig is a group of options for parentBased sampler. type samplerConfig struct { remoteParentSampled, remoteParentNotSampled Sampler localParentSampled, localParentNotSampled Sampler } // ParentBasedSamplerOption configures the sampler for a particular sampling case. type ParentBasedSamplerOption interface { apply(samplerConfig) samplerConfig } // WithRemoteParentSampled sets the sampler for the case of sampled remote parent. func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { return remoteParentSampledOption{s} } type remoteParentSampledOption struct { s Sampler } func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { config.remoteParentSampled = o.s return config } // WithRemoteParentNotSampled sets the sampler for the case of remote parent // which is not sampled. func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { return remoteParentNotSampledOption{s} } type remoteParentNotSampledOption struct { s Sampler } func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { config.remoteParentNotSampled = o.s return config } // WithLocalParentSampled sets the sampler for the case of sampled local parent. func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { return localParentSampledOption{s} } type localParentSampledOption struct { s Sampler } func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { config.localParentSampled = o.s return config } // WithLocalParentNotSampled sets the sampler for the case of local parent // which is not sampled. func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { return localParentNotSampledOption{s} } type localParentNotSampledOption struct { s Sampler } func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { config.localParentNotSampled = o.s return config } func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { psc := trace.SpanContextFromContext(p.ParentContext) if psc.IsValid() { if psc.IsRemote() { if psc.IsSampled() { return pb.config.remoteParentSampled.ShouldSample(p) } return pb.config.remoteParentNotSampled.ShouldSample(p) } if psc.IsSampled() { return pb.config.localParentSampled.ShouldSample(p) } return pb.config.localParentNotSampled.ShouldSample(p) } return pb.root.ShouldSample(p) } func (pb parentBased) Description() string { return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", pb.root.Description(), pb.config.remoteParentSampled.Description(), pb.config.remoteParentNotSampled.Description(), pb.config.localParentSampled.Description(), pb.config.localParentNotSampled.Description(), ) } opentelemetry-go-1.21.0/sdk/trace/sampling_test.go000066400000000000000000000147261452547353200221750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "fmt" "math/rand" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/trace" ) func TestParentBasedDefaultLocalParentSampled(t *testing.T) { sampler := ParentBased(AlwaysSample()) traceID, _ := trace.TraceIDFromHex("4bf92f3577b34da6a3ce929d0e0e4736") spanID, _ := trace.SpanIDFromHex("00f067aa0ba902b7") parentCtx := trace.ContextWithSpanContext( context.Background(), trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, TraceFlags: trace.FlagsSampled, }), ) if sampler.ShouldSample(SamplingParameters{ParentContext: parentCtx}).Decision != RecordAndSample { t.Error("Sampling decision should be RecordAndSample") } } func TestParentBasedDefaultLocalParentNotSampled(t *testing.T) { sampler := ParentBased(AlwaysSample()) traceID, _ := trace.TraceIDFromHex("4bf92f3577b34da6a3ce929d0e0e4736") spanID, _ := trace.SpanIDFromHex("00f067aa0ba902b7") parentCtx := trace.ContextWithSpanContext( context.Background(), trace.NewSpanContext(trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, }), ) if sampler.ShouldSample(SamplingParameters{ParentContext: parentCtx}).Decision != Drop { t.Error("Sampling decision should be Drop") } } func TestParentBasedWithNoParent(t *testing.T) { params := SamplingParameters{} sampler := ParentBased(AlwaysSample()) if sampler.ShouldSample(params).Decision != RecordAndSample { t.Error("Sampling decision should be RecordAndSample") } sampler = ParentBased(NeverSample()) if sampler.ShouldSample(params).Decision != Drop { t.Error("Sampling decision should be Drop") } } func TestParentBasedWithSamplerOptions(t *testing.T) { testCases := []struct { name string samplerOption ParentBasedSamplerOption isParentRemote, isParentSampled bool expectedDecision SamplingDecision }{ { "localParentSampled", WithLocalParentSampled(NeverSample()), false, true, Drop, }, { "localParentNotSampled", WithLocalParentNotSampled(AlwaysSample()), false, false, RecordAndSample, }, { "remoteParentSampled", WithRemoteParentSampled(NeverSample()), true, true, Drop, }, { "remoteParentNotSampled", WithRemoteParentNotSampled(AlwaysSample()), true, false, RecordAndSample, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { traceID, _ := trace.TraceIDFromHex("4bf92f3577b34da6a3ce929d0e0e4736") spanID, _ := trace.SpanIDFromHex("00f067aa0ba902b7") pscc := trace.SpanContextConfig{ TraceID: traceID, SpanID: spanID, Remote: tc.isParentRemote, } if tc.isParentSampled { pscc.TraceFlags = trace.FlagsSampled } params := SamplingParameters{ ParentContext: trace.ContextWithSpanContext( context.Background(), trace.NewSpanContext(pscc), ), } sampler := ParentBased( nil, tc.samplerOption, ) var wantStr, gotStr string switch tc.expectedDecision { case RecordAndSample: wantStr = "RecordAndSample" case Drop: wantStr = "Drop" default: wantStr = "unknown" } actualDecision := sampler.ShouldSample(params).Decision switch actualDecision { case RecordAndSample: gotStr = "RecordAndSample" case Drop: gotStr = "Drop" default: gotStr = "unknown" } assert.Equalf(t, tc.expectedDecision, actualDecision, "want %s, got %s", wantStr, gotStr) }) } } func TestParentBasedDefaultDescription(t *testing.T) { sampler := ParentBased(AlwaysSample()) expectedDescription := fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", AlwaysSample().Description(), AlwaysSample().Description(), NeverSample().Description(), AlwaysSample().Description(), NeverSample().Description()) if sampler.Description() != expectedDescription { t.Errorf("Sampler description should be %s, got '%s' instead", expectedDescription, sampler.Description(), ) } } // TraceIDRatioBased sampler requirements state // // "A TraceIDRatioBased sampler with a given sampling rate MUST also sample // all traces that any TraceIDRatioBased sampler with a lower sampling rate // would sample." func TestTraceIdRatioSamplesInclusively(t *testing.T) { const ( numSamplers = 1000 numTraces = 100 ) idg := defaultIDGenerator() for i := 0; i < numSamplers; i++ { ratioLo, ratioHi := rand.Float64(), rand.Float64() if ratioHi < ratioLo { ratioLo, ratioHi = ratioHi, ratioLo } samplerHi := TraceIDRatioBased(ratioHi) samplerLo := TraceIDRatioBased(ratioLo) for j := 0; j < numTraces; j++ { traceID, _ := idg.NewIDs(context.Background()) params := SamplingParameters{TraceID: traceID} if samplerLo.ShouldSample(params).Decision == RecordAndSample { require.Equal(t, RecordAndSample, samplerHi.ShouldSample(params).Decision, "%s sampled but %s did not", samplerLo.Description(), samplerHi.Description()) } } } } func TestTracestateIsPassed(t *testing.T) { testCases := []struct { name string sampler Sampler }{ { "notSampled", NeverSample(), }, { "sampled", AlwaysSample(), }, { "parentSampled", ParentBased(AlwaysSample()), }, { "parentNotSampled", ParentBased(NeverSample()), }, { "traceIDRatioSampler", TraceIDRatioBased(.5), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { traceState, err := trace.ParseTraceState("k=v") if err != nil { t.Error(err) } params := SamplingParameters{ ParentContext: trace.ContextWithSpanContext( context.Background(), trace.NewSpanContext(trace.SpanContextConfig{ TraceState: traceState, }), ), } require.Equal(t, traceState, tc.sampler.ShouldSample(params).Tracestate, "TraceState is not equal") }) } } opentelemetry-go-1.21.0/sdk/trace/simple_span_processor.go000066400000000000000000000107221452547353200237250ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/internal/global" ) // simpleSpanProcessor is a SpanProcessor that synchronously sends all // completed Spans to a trace.Exporter immediately. type simpleSpanProcessor struct { exporterMu sync.Mutex exporter SpanExporter stopOnce sync.Once } var _ SpanProcessor = (*simpleSpanProcessor)(nil) // NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously // send completed spans to the exporter immediately. // // This SpanProcessor is not recommended for production use. The synchronous // nature of this SpanProcessor make it good for testing, debugging, or // showing examples of other feature, but it will be slow and have a high // computation resource usage overhead. The BatchSpanProcessor is recommended // for production use instead. func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { ssp := &simpleSpanProcessor{ exporter: exporter, } global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") return ssp } // OnStart does nothing. func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} // OnEnd immediately exports a ReadOnlySpan. func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { ssp.exporterMu.Lock() defer ssp.exporterMu.Unlock() if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { otel.Handle(err) } } } // Shutdown shuts down the exporter this SimpleSpanProcessor exports to. func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { var err error ssp.stopOnce.Do(func() { stopFunc := func(exp SpanExporter) (<-chan error, func()) { done := make(chan error) return done, func() { done <- exp.Shutdown(ctx) } } // The exporter field of the simpleSpanProcessor needs to be zeroed to // signal it is shut down, meaning all subsequent calls to OnEnd will // be gracefully ignored. This needs to be done synchronously to avoid // any race condition. // // A closure is used to keep reference to the exporter and then the // field is zeroed. This ensures the simpleSpanProcessor is shut down // before the exporter. This order is important as it avoids a // potential deadlock. If the exporter shut down operation generates a // span, that span would need to be exported. Meaning, OnEnd would be // called and try acquiring the lock that is held here. ssp.exporterMu.Lock() done, shutdown := stopFunc(ssp.exporter) ssp.exporter = nil ssp.exporterMu.Unlock() go shutdown() // Wait for the exporter to shut down or the deadline to expire. select { case err = <-done: case <-ctx.Done(): // It is possible for the exporter to have immediately shut down // and the context to be done simultaneously. In that case this // outer select statement will randomly choose a case. This will // result in a different returned error for similar scenarios. // Instead, double check if the exporter shut down at the same // time and return that error if so. This will ensure consistency // as well as ensure the caller knows the exporter shut down // successfully (they can already determine if the deadline is // expired given they passed the context). select { case err = <-done: default: err = ctx.Err() } } }) return err } // ForceFlush does nothing as there is no data to flush. func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { return nil } // MarshalLog is the marshaling function used by the logging system to represent this Span Processor. func (ssp *simpleSpanProcessor) MarshalLog() interface{} { return struct { Type string Exporter SpanExporter }{ Type: "SimpleSpanProcessor", Exporter: ssp.exporter, } } opentelemetry-go-1.21.0/sdk/trace/simple_span_processor_test.go000066400000000000000000000123021452547353200247600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace_test import ( "context" "errors" "sync" "testing" "time" "github.com/stretchr/testify/assert" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) var ( tid, _ = trace.TraceIDFromHex("01020304050607080102040810203040") sid, _ = trace.SpanIDFromHex("0102040810203040") ) type testExporter struct { spans []sdktrace.ReadOnlySpan shutdown bool } func (t *testExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { t.spans = append(t.spans, spans...) return nil } func (t *testExporter) Shutdown(ctx context.Context) error { t.shutdown = true select { case <-ctx.Done(): // Ensure context deadline tests receive the expected error. return ctx.Err() default: return nil } } var _ sdktrace.SpanExporter = (*testExporter)(nil) func TestNewSimpleSpanProcessor(t *testing.T) { if ssp := sdktrace.NewSimpleSpanProcessor(&testExporter{}); ssp == nil { t.Error("failed to create new SimpleSpanProcessor") } } func TestNewSimpleSpanProcessorWithNilExporter(t *testing.T) { if ssp := sdktrace.NewSimpleSpanProcessor(nil); ssp == nil { t.Error("failed to create new SimpleSpanProcessor with nil exporter") } } func startSpan(tp trace.TracerProvider) trace.Span { tr := tp.Tracer("SimpleSpanProcessor") sc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, SpanID: sid, TraceFlags: 0x1, }) ctx := trace.ContextWithRemoteSpanContext(context.Background(), sc) _, span := tr.Start(ctx, "OnEnd") return span } func TestSimpleSpanProcessorOnEnd(t *testing.T) { tp := basicTracerProvider(t) te := testExporter{} ssp := sdktrace.NewSimpleSpanProcessor(&te) tp.RegisterSpanProcessor(ssp) startSpan(tp).End() wantTraceID := tid gotTraceID := te.spans[0].SpanContext().TraceID() if wantTraceID != gotTraceID { t.Errorf("SimplerSpanProcessor OnEnd() check: got %+v, want %+v\n", gotTraceID, wantTraceID) } } func TestSimpleSpanProcessorShutdown(t *testing.T) { exporter := &testExporter{} ssp := sdktrace.NewSimpleSpanProcessor(exporter) // Ensure we can export a span before we test we cannot after shutdown. tp := basicTracerProvider(t) tp.RegisterSpanProcessor(ssp) startSpan(tp).End() nExported := len(exporter.spans) if nExported != 1 { t.Error("failed to verify span export") } if err := ssp.Shutdown(context.Background()); err != nil { t.Errorf("shutting the SimpleSpanProcessor down: %v", err) } if !exporter.shutdown { t.Error("SimpleSpanProcessor.Shutdown did not shut down exporter") } startSpan(tp).End() if len(exporter.spans) > nExported { t.Error("exported span to shutdown exporter") } } func TestSimpleSpanProcessorShutdownOnEndConcurrentSafe(t *testing.T) { exporter := &testExporter{} ssp := sdktrace.NewSimpleSpanProcessor(exporter) tp := basicTracerProvider(t) tp.RegisterSpanProcessor(ssp) stop := make(chan struct{}) done := make(chan struct{}) go func() { defer func() { done <- struct{}{} }() for { select { case <-stop: return default: startSpan(tp).End() } } }() if err := ssp.Shutdown(context.Background()); err != nil { t.Errorf("shutting the SimpleSpanProcessor down: %v", err) } if !exporter.shutdown { t.Error("SimpleSpanProcessor.Shutdown did not shut down exporter") } stop <- struct{}{} <-done } func TestSimpleSpanProcessorShutdownOnEndConcurrentSafe2(t *testing.T) { exporter := &testExporter{} ssp := sdktrace.NewSimpleSpanProcessor(exporter) tp := basicTracerProvider(t) tp.RegisterSpanProcessor(ssp) var wg sync.WaitGroup wg.Add(2) span := func(spanName string) { assert.NotPanics(t, func() { defer wg.Done() _, span := tp.Tracer("test").Start(context.Background(), spanName) span.End() }) } go span("test-span-1") go span("test-span-2") wg.Wait() assert.NoError(t, ssp.Shutdown(context.Background())) assert.True(t, exporter.shutdown, "exporter shutdown") } func TestSimpleSpanProcessorShutdownHonorsContextDeadline(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond) defer cancel() <-ctx.Done() ssp := sdktrace.NewSimpleSpanProcessor(&testExporter{}) if got, want := ssp.Shutdown(ctx), context.DeadlineExceeded; !errors.Is(got, want) { t.Errorf("SimpleSpanProcessor.Shutdown did not return %v, got %v", want, got) } } func TestSimpleSpanProcessorShutdownHonorsContextCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() ssp := sdktrace.NewSimpleSpanProcessor(&testExporter{}) if got, want := ssp.Shutdown(ctx), context.Canceled; !errors.Is(got, want) { t.Errorf("SimpleSpanProcessor.Shutdown did not return %v, got %v", want, got) } } opentelemetry-go-1.21.0/sdk/trace/snapshot.go000066400000000000000000000101621452547353200211510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/trace" ) // snapshot is an record of a spans state at a particular checkpointed time. // It is used as a read-only representation of that state. type snapshot struct { name string spanContext trace.SpanContext parent trace.SpanContext spanKind trace.SpanKind startTime time.Time endTime time.Time attributes []attribute.KeyValue events []Event links []Link status Status childSpanCount int droppedAttributeCount int droppedEventCount int droppedLinkCount int resource *resource.Resource instrumentationScope instrumentation.Scope } var _ ReadOnlySpan = snapshot{} func (s snapshot) private() {} // Name returns the name of the span. func (s snapshot) Name() string { return s.name } // SpanContext returns the unique SpanContext that identifies the span. func (s snapshot) SpanContext() trace.SpanContext { return s.spanContext } // Parent returns the unique SpanContext that identifies the parent of the // span if one exists. If the span has no parent the returned SpanContext // will be invalid. func (s snapshot) Parent() trace.SpanContext { return s.parent } // SpanKind returns the role the span plays in a Trace. func (s snapshot) SpanKind() trace.SpanKind { return s.spanKind } // StartTime returns the time the span started recording. func (s snapshot) StartTime() time.Time { return s.startTime } // EndTime returns the time the span stopped recording. It will be zero if // the span has not ended. func (s snapshot) EndTime() time.Time { return s.endTime } // Attributes returns the defining attributes of the span. func (s snapshot) Attributes() []attribute.KeyValue { return s.attributes } // Links returns all the links the span has to other spans. func (s snapshot) Links() []Link { return s.links } // Events returns all the events that occurred within in the spans // lifetime. func (s snapshot) Events() []Event { return s.events } // Status returns the spans status. func (s snapshot) Status() Status { return s.status } // InstrumentationScope returns information about the instrumentation // scope that created the span. func (s snapshot) InstrumentationScope() instrumentation.Scope { return s.instrumentationScope } // InstrumentationLibrary returns information about the instrumentation // library that created the span. func (s snapshot) InstrumentationLibrary() instrumentation.Library { return s.instrumentationScope } // Resource returns information about the entity that produced the span. func (s snapshot) Resource() *resource.Resource { return s.resource } // DroppedAttributes returns the number of attributes dropped by the span // due to limits being reached. func (s snapshot) DroppedAttributes() int { return s.droppedAttributeCount } // DroppedLinks returns the number of links dropped by the span due to limits // being reached. func (s snapshot) DroppedLinks() int { return s.droppedLinkCount } // DroppedEvents returns the number of events dropped by the span due to // limits being reached. func (s snapshot) DroppedEvents() int { return s.droppedEventCount } // ChildSpanCount returns the count of spans that consider the span a // direct parent. func (s snapshot) ChildSpanCount() int { return s.childSpanCount } opentelemetry-go-1.21.0/sdk/trace/span.go000066400000000000000000000604531452547353200202630ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "fmt" "reflect" "runtime" rt "runtime/trace" "strings" "sync" "time" "unicode/utf8" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) // ReadOnlySpan allows reading information from the data structure underlying a // trace.Span. It is used in places where reading information from a span is // necessary but changing the span isn't necessary or allowed. // // Warning: methods may be added to this interface in minor releases. type ReadOnlySpan interface { // Name returns the name of the span. Name() string // SpanContext returns the unique SpanContext that identifies the span. SpanContext() trace.SpanContext // Parent returns the unique SpanContext that identifies the parent of the // span if one exists. If the span has no parent the returned SpanContext // will be invalid. Parent() trace.SpanContext // SpanKind returns the role the span plays in a Trace. SpanKind() trace.SpanKind // StartTime returns the time the span started recording. StartTime() time.Time // EndTime returns the time the span stopped recording. It will be zero if // the span has not ended. EndTime() time.Time // Attributes returns the defining attributes of the span. // The order of the returned attributes is not guaranteed to be stable across invocations. Attributes() []attribute.KeyValue // Links returns all the links the span has to other spans. Links() []Link // Events returns all the events that occurred within in the spans // lifetime. Events() []Event // Status returns the spans status. Status() Status // InstrumentationScope returns information about the instrumentation // scope that created the span. InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library // Resource returns information about the entity that produced the span. Resource() *resource.Resource // DroppedAttributes returns the number of attributes dropped by the span // due to limits being reached. DroppedAttributes() int // DroppedLinks returns the number of links dropped by the span due to // limits being reached. DroppedLinks() int // DroppedEvents returns the number of events dropped by the span due to // limits being reached. DroppedEvents() int // ChildSpanCount returns the count of spans that consider the span a // direct parent. ChildSpanCount() int // A private method to prevent users implementing the // interface and so future additions to it will not // violate compatibility. private() } // ReadWriteSpan exposes the same methods as trace.Span and in addition allows // reading information from the underlying data structure. // This interface exposes the union of the methods of trace.Span (which is a // "write-only" span) and ReadOnlySpan. New methods for writing or reading span // information should be added under trace.Span or ReadOnlySpan, respectively. // // Warning: methods may be added to this interface in minor releases. type ReadWriteSpan interface { trace.Span ReadOnlySpan } // recordingSpan is an implementation of the OpenTelemetry Span API // representing the individual component of a trace that is sampled. type recordingSpan struct { embedded.Span // mu protects the contents of this span. mu sync.Mutex // parent holds the parent span of this span as a trace.SpanContext. parent trace.SpanContext // spanKind represents the kind of this span as a trace.SpanKind. spanKind trace.SpanKind // name is the name of this span. name string // startTime is the time at which this span was started. startTime time.Time // endTime is the time at which this span was ended. It contains the zero // value of time.Time until the span is ended. endTime time.Time // status is the status of this span. status Status // childSpanCount holds the number of child spans created for this span. childSpanCount int // spanContext holds the SpanContext of this span. spanContext trace.SpanContext // attributes is a collection of user provided key/values. The collection // is constrained by a configurable maximum held by the parent // TracerProvider. When additional attributes are added after this maximum // is reached these attributes the user is attempting to add are dropped. // This dropped number of attributes is tracked and reported in the // ReadOnlySpan exported when the span ends. attributes []attribute.KeyValue droppedAttributes int // events are stored in FIFO queue capped by configured limit. events evictedQueue // links are stored in FIFO queue capped by configured limit. links evictedQueue // executionTracerTaskEnd ends the execution tracer span. executionTracerTaskEnd func() // tracer is the SDK tracer that created this span. tracer *tracer } var ( _ ReadWriteSpan = (*recordingSpan)(nil) _ runtimeTracer = (*recordingSpan)(nil) ) // SpanContext returns the SpanContext of this span. func (s *recordingSpan) SpanContext() trace.SpanContext { if s == nil { return trace.SpanContext{} } return s.spanContext } // IsRecording returns if this span is being recorded. If this span has ended // this will return false. func (s *recordingSpan) IsRecording() bool { if s == nil { return false } s.mu.Lock() defer s.mu.Unlock() return s.endTime.IsZero() } // SetStatus sets the status of the Span in the form of a code and a // description, overriding previous values set. The description is only // included in the set status when the code is for an error. If this span is // not being recorded than this method does nothing. func (s *recordingSpan) SetStatus(code codes.Code, description string) { if !s.IsRecording() { return } s.mu.Lock() defer s.mu.Unlock() if s.status.Code > code { return } status := Status{Code: code} if code == codes.Error { status.Description = description } s.status = status } // SetAttributes sets attributes of this span. // // If a key from attributes already exists the value associated with that key // will be overwritten with the value contained in attributes. // // If this span is not being recorded than this method does nothing. // // If adding attributes to the span would exceed the maximum amount of // attributes the span is configured to have, the last added attributes will // be dropped. func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { if !s.IsRecording() { return } s.mu.Lock() defer s.mu.Unlock() limit := s.tracer.provider.spanLimits.AttributeCountLimit if limit == 0 { // No attributes allowed. s.droppedAttributes += len(attributes) return } // If adding these attributes could exceed the capacity of s perform a // de-duplication and truncation while adding to avoid over allocation. if limit > 0 && len(s.attributes)+len(attributes) > limit { s.addOverCapAttrs(limit, attributes) return } // Otherwise, add without deduplication. When attributes are read they // will be deduplicated, optimizing the operation. for _, a := range attributes { if !a.Valid() { // Drop all invalid attributes. s.droppedAttributes++ continue } a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) } } // addOverCapAttrs adds the attributes attrs to the span s while // de-duplicating the attributes of s and attrs and dropping attributes that // exceed the limit. // // This method assumes s.mu.Lock is held by the caller. // // This method should only be called when there is a possibility that adding // attrs to s will exceed the limit. Otherwise, attrs should be added to s // without checking for duplicates and all retrieval methods of the attributes // for s will de-duplicate as needed. // // This method assumes limit is a value > 0. The argument should be validated // by the caller. func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // In order to not allocate more capacity to s.attributes than needed, // prune and truncate this addition of attributes while adding. // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. exists := make(map[attribute.Key]int) s.dedupeAttrsFromRecord(&exists) // Now that s.attributes is deduplicated, adding unique attributes up to // the capacity of s will not over allocate s.attributes. for _, a := range attrs { if !a.Valid() { // Drop all invalid attributes. s.droppedAttributes++ continue } if idx, ok := exists[a.Key]; ok { // Perform all updates before dropping, even when at capacity. s.attributes[idx] = a continue } if len(s.attributes) >= limit { // Do not just drop all of the remaining attributes, make sure // updates are checked and performed. s.droppedAttributes++ } else { a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) exists[a.Key] = len(s.attributes) - 1 } } } // truncateAttr returns a truncated version of attr. Only string and string // slice attribute values are truncated. String values are truncated to at // most a length of limit. Each string slice value is truncated in this fashion // (the slice length itself is unaffected). // // No truncation is performed for a negative limit. func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { if limit < 0 { return attr } switch attr.Value.Type() { case attribute.STRING: if v := attr.Value.AsString(); len(v) > limit { return attr.Key.String(safeTruncate(v, limit)) } case attribute.STRINGSLICE: v := attr.Value.AsStringSlice() for i := range v { if len(v[i]) > limit { v[i] = safeTruncate(v[i], limit) } } return attr.Key.StringSlice(v) } return attr } // safeTruncate truncates the string and guarantees valid UTF-8 is returned. func safeTruncate(input string, limit int) string { if trunc, ok := safeTruncateValidUTF8(input, limit); ok { return trunc } trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) return trunc } // safeTruncateValidUTF8 returns a copy of the input string safely truncated to // limit. The truncation is ensured to occur at the bounds of complete UTF-8 // characters. If invalid encoding of UTF-8 is encountered, input is returned // with false, otherwise, the truncated input will be returned with true. func safeTruncateValidUTF8(input string, limit int) (string, bool) { for cnt := 0; cnt <= limit; { r, size := utf8.DecodeRuneInString(input[cnt:]) if r == utf8.RuneError { return input, false } if cnt+size > limit { return input[:cnt], true } cnt += size } return input, true } // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // // The only SpanOption currently supported is WithTimestamp which will set the // end time for a Span's life-cycle. // // If this method is called while panicking an error event is added to the // Span before ending it and the panic is continued. func (s *recordingSpan) End(options ...trace.SpanEndOption) { // Do not start by checking if the span is being recorded which requires // acquiring a lock. Make a minimal check that the span is not nil. if s == nil { return } // Store the end time as soon as possible to avoid artificially increasing // the span's duration in case some operation below takes a while. et := internal.MonotonicEndTime(s.startTime) // Do relative expensive check now that we have an end time and see if we // need to do any more processing. if !s.IsRecording() { return } config := trace.NewSpanEndConfig(options...) if recovered := recover(); recovered != nil { // Record but don't stop the panic. defer panic(recovered) opts := []trace.EventOption{ trace.WithAttributes( semconv.ExceptionType(typeStr(recovered)), semconv.ExceptionMessage(fmt.Sprint(recovered)), ), } if config.StackTrace() { opts = append(opts, trace.WithAttributes( semconv.ExceptionStacktrace(recordStackTrace()), )) } s.addEvent(semconv.ExceptionEventName, opts...) } if s.executionTracerTaskEnd != nil { s.executionTracerTaskEnd() } s.mu.Lock() // Setting endTime to non-zero marks the span as ended and not recording. if config.Timestamp().IsZero() { s.endTime = et } else { s.endTime = config.Timestamp() } s.mu.Unlock() sps := s.tracer.provider.getSpanProcessors() if len(sps) == 0 { return } snap := s.snapshot() for _, sp := range sps { sp.sp.OnEnd(snap) } } // RecordError will record err as a span event for this span. An additional call to // SetStatus is required if the Status of the Span should be set to Error, this method // does not change the Span status. If this span is not being recorded or err is nil // than this method does nothing. func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { if s == nil || err == nil || !s.IsRecording() { return } opts = append(opts, trace.WithAttributes( semconv.ExceptionType(typeStr(err)), semconv.ExceptionMessage(err.Error()), )) c := trace.NewEventConfig(opts...) if c.StackTrace() { opts = append(opts, trace.WithAttributes( semconv.ExceptionStacktrace(recordStackTrace()), )) } s.addEvent(semconv.ExceptionEventName, opts...) } func typeStr(i interface{}) string { t := reflect.TypeOf(i) if t.PkgPath() == "" && t.Name() == "" { // Likely a builtin type. return t.String() } return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) } func recordStackTrace() string { stackTrace := make([]byte, 2048) n := runtime.Stack(stackTrace, false) return string(stackTrace[0:n]) } // AddEvent adds an event with the provided name and options. If this span is // not being recorded than this method does nothing. func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { if !s.IsRecording() { return } s.addEvent(name, o...) } func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} // Discard attributes over limit. limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit if limit == 0 { // Drop all attributes. e.DroppedAttributeCount = len(e.Attributes) e.Attributes = nil } else if limit > 0 && len(e.Attributes) > limit { // Drop over capacity. e.DroppedAttributeCount = len(e.Attributes) - limit e.Attributes = e.Attributes[:limit] } s.mu.Lock() s.events.add(e) s.mu.Unlock() } // SetName sets the name of this span. If this span is not being recorded than // this method does nothing. func (s *recordingSpan) SetName(name string) { if !s.IsRecording() { return } s.mu.Lock() defer s.mu.Unlock() s.name = name } // Name returns the name of this span. func (s *recordingSpan) Name() string { s.mu.Lock() defer s.mu.Unlock() return s.name } // Name returns the SpanContext of this span's parent span. func (s *recordingSpan) Parent() trace.SpanContext { s.mu.Lock() defer s.mu.Unlock() return s.parent } // SpanKind returns the SpanKind of this span. func (s *recordingSpan) SpanKind() trace.SpanKind { s.mu.Lock() defer s.mu.Unlock() return s.spanKind } // StartTime returns the time this span started. func (s *recordingSpan) StartTime() time.Time { s.mu.Lock() defer s.mu.Unlock() return s.startTime } // EndTime returns the time this span ended. For spans that have not yet // ended, the returned value will be the zero value of time.Time. func (s *recordingSpan) EndTime() time.Time { s.mu.Lock() defer s.mu.Unlock() return s.endTime } // Attributes returns the attributes of this span. // // The order of the returned attributes is not guaranteed to be stable. func (s *recordingSpan) Attributes() []attribute.KeyValue { s.mu.Lock() defer s.mu.Unlock() s.dedupeAttrs() return s.attributes } // dedupeAttrs deduplicates the attributes of s to fit capacity. // // This method assumes s.mu.Lock is held by the caller. func (s *recordingSpan) dedupeAttrs() { // Do not set a capacity when creating this map. Benchmark testing has // showed this to only add unused memory allocations in general use. exists := make(map[attribute.Key]int) s.dedupeAttrsFromRecord(&exists) } // dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity // using record as the record of unique attribute keys to their index. // // This method assumes s.mu.Lock is held by the caller. func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { // Use the fact that slices share the same backing array. unique := s.attributes[:0] for _, a := range s.attributes { if idx, ok := (*record)[a.Key]; ok { unique[idx] = a } else { unique = append(unique, a) (*record)[a.Key] = len(unique) - 1 } } // s.attributes have element types of attribute.KeyValue. These types are // not pointers and they themselves do not contain pointer fields, // therefore the duplicate values do not need to be zeroed for them to be // garbage collected. s.attributes = unique } // Links returns the links of this span. func (s *recordingSpan) Links() []Link { s.mu.Lock() defer s.mu.Unlock() if len(s.links.queue) == 0 { return []Link{} } return s.interfaceArrayToLinksArray() } // Events returns the events of this span. func (s *recordingSpan) Events() []Event { s.mu.Lock() defer s.mu.Unlock() if len(s.events.queue) == 0 { return []Event{} } return s.interfaceArrayToEventArray() } // Status returns the status of this span. func (s *recordingSpan) Status() Status { s.mu.Lock() defer s.mu.Unlock() return s.status } // InstrumentationScope returns the instrumentation.Scope associated with // the Tracer that created this span. func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope } // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { s.mu.Lock() defer s.mu.Unlock() return s.tracer.instrumentationScope } // Resource returns the Resource associated with the Tracer that created this // span. func (s *recordingSpan) Resource() *resource.Resource { s.mu.Lock() defer s.mu.Unlock() return s.tracer.provider.resource } func (s *recordingSpan) addLink(link trace.Link) { if !s.IsRecording() || !link.SpanContext.IsValid() { return } l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} // Discard attributes over limit. limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit if limit == 0 { // Drop all attributes. l.DroppedAttributeCount = len(l.Attributes) l.Attributes = nil } else if limit > 0 && len(l.Attributes) > limit { l.DroppedAttributeCount = len(l.Attributes) - limit l.Attributes = l.Attributes[:limit] } s.mu.Lock() s.links.add(l) s.mu.Unlock() } // DroppedAttributes returns the number of attributes dropped by the span // due to limits being reached. func (s *recordingSpan) DroppedAttributes() int { s.mu.Lock() defer s.mu.Unlock() return s.droppedAttributes } // DroppedLinks returns the number of links dropped by the span due to limits // being reached. func (s *recordingSpan) DroppedLinks() int { s.mu.Lock() defer s.mu.Unlock() return s.links.droppedCount } // DroppedEvents returns the number of events dropped by the span due to // limits being reached. func (s *recordingSpan) DroppedEvents() int { s.mu.Lock() defer s.mu.Unlock() return s.events.droppedCount } // ChildSpanCount returns the count of spans that consider the span a // direct parent. func (s *recordingSpan) ChildSpanCount() int { s.mu.Lock() defer s.mu.Unlock() return s.childSpanCount } // TracerProvider returns a trace.TracerProvider that can be used to generate // additional Spans on the same telemetry pipeline as the current Span. func (s *recordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } // snapshot creates a read-only copy of the current state of the span. func (s *recordingSpan) snapshot() ReadOnlySpan { var sd snapshot s.mu.Lock() defer s.mu.Unlock() sd.endTime = s.endTime sd.instrumentationScope = s.tracer.instrumentationScope sd.name = s.name sd.parent = s.parent sd.resource = s.tracer.provider.resource sd.spanContext = s.spanContext sd.spanKind = s.spanKind sd.startTime = s.startTime sd.status = s.status sd.childSpanCount = s.childSpanCount if len(s.attributes) > 0 { s.dedupeAttrs() sd.attributes = s.attributes } sd.droppedAttributeCount = s.droppedAttributes if len(s.events.queue) > 0 { sd.events = s.interfaceArrayToEventArray() sd.droppedEventCount = s.events.droppedCount } if len(s.links.queue) > 0 { sd.links = s.interfaceArrayToLinksArray() sd.droppedLinkCount = s.links.droppedCount } return &sd } func (s *recordingSpan) interfaceArrayToLinksArray() []Link { linkArr := make([]Link, 0) for _, value := range s.links.queue { linkArr = append(linkArr, value.(Link)) } return linkArr } func (s *recordingSpan) interfaceArrayToEventArray() []Event { eventArr := make([]Event, 0) for _, value := range s.events.queue { eventArr = append(eventArr, value.(Event)) } return eventArr } func (s *recordingSpan) addChild() { if !s.IsRecording() { return } s.mu.Lock() s.childSpanCount++ s.mu.Unlock() } func (*recordingSpan) private() {} // runtimeTrace starts a "runtime/trace".Task for the span and returns a // context containing the task. func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { if !rt.IsEnabled() { // Avoid additional overhead if runtime/trace is not enabled. return ctx } nctx, task := rt.NewTask(ctx, s.name) s.mu.Lock() s.executionTracerTaskEnd = task.End s.mu.Unlock() return nctx } // nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API // that wraps a SpanContext. It performs no operations other than to return // the wrapped SpanContext or TracerProvider that created it. type nonRecordingSpan struct { embedded.Span // tracer is the SDK tracer that created this span. tracer *tracer sc trace.SpanContext } var _ trace.Span = nonRecordingSpan{} // SpanContext returns the wrapped SpanContext. func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } // IsRecording always returns false. func (nonRecordingSpan) IsRecording() bool { return false } // SetStatus does nothing. func (nonRecordingSpan) SetStatus(codes.Code, string) {} // SetError does nothing. func (nonRecordingSpan) SetError(bool) {} // SetAttributes does nothing. func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} // End does nothing. func (nonRecordingSpan) End(...trace.SpanEndOption) {} // RecordError does nothing. func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} // SetName does nothing. func (nonRecordingSpan) SetName(string) {} // TracerProvider returns the trace.TracerProvider that provided the Tracer // that created this span. func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } func isRecording(s SamplingResult) bool { return s.Decision == RecordOnly || s.Decision == RecordAndSample } func isSampled(s SamplingResult) bool { return s.Decision == RecordAndSample } // Status is the classified state of a Span. type Status struct { // Code is an identifier of a Spans state classification. Code codes.Code // Description is a user hint about why that status was set. It is only // applicable when Code is Error. Description string } opentelemetry-go-1.21.0/sdk/trace/span_exporter.go000066400000000000000000000041071452547353200222050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import "context" // SpanExporter handles the delivery of spans to external receivers. This is // the final component in the trace export pipeline. type SpanExporter interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // ExportSpans exports a batch of spans. // // This function is called synchronously, so there is no concurrency // safety requirement. However, due to the synchronous calling pattern, // it is critical that all timeouts and cancellations contained in the // passed context must be honored. // // Any retry logic must be contained in this function. The SDK that // calls this function will not implement any retry logic. All errors // returned by this function are considered unrecoverable and will be // reported to a configured error Handler. ExportSpans(ctx context.Context, spans []ReadOnlySpan) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Shutdown notifies the exporter of a pending halt to operations. The // exporter is expected to perform any cleanup or synchronization it // requires while honoring all timeouts and cancellations contained in // the passed context. Shutdown(ctx context.Context) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } opentelemetry-go-1.21.0/sdk/trace/span_limits.go000066400000000000000000000113011452547353200216300ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import "go.opentelemetry.io/otel/sdk/internal/env" const ( // DefaultAttributeValueLengthLimit is the default maximum allowed // attribute value length, unlimited. DefaultAttributeValueLengthLimit = -1 // DefaultAttributeCountLimit is the default maximum number of attributes // a span can have. DefaultAttributeCountLimit = 128 // DefaultEventCountLimit is the default maximum number of events a span // can have. DefaultEventCountLimit = 128 // DefaultLinkCountLimit is the default maximum number of links a span can // have. DefaultLinkCountLimit = 128 // DefaultAttributePerEventCountLimit is the default maximum number of // attributes a span event can have. DefaultAttributePerEventCountLimit = 128 // DefaultAttributePerLinkCountLimit is the default maximum number of // attributes a span link can have. DefaultAttributePerLinkCountLimit = 128 ) // SpanLimits represents the limits of a span. type SpanLimits struct { // AttributeValueLengthLimit is the maximum allowed attribute value length. // // This limit only applies to string and string slice attribute values. // Any string longer than this value will be truncated to this length. // // Setting this to a negative value means no limit is applied. AttributeValueLengthLimit int // AttributeCountLimit is the maximum allowed span attribute count. Any // attribute added to a span once this limit is reached will be dropped. // // Setting this to zero means no attributes will be recorded. // // Setting this to a negative value means no limit is applied. AttributeCountLimit int // EventCountLimit is the maximum allowed span event count. Any event // added to a span once this limit is reached means it will be added but // the oldest event will be dropped. // // Setting this to zero means no events we be recorded. // // Setting this to a negative value means no limit is applied. EventCountLimit int // LinkCountLimit is the maximum allowed span link count. Any link added // to a span once this limit is reached means it will be added but the // oldest link will be dropped. // // Setting this to zero means no links we be recorded. // // Setting this to a negative value means no limit is applied. LinkCountLimit int // AttributePerEventCountLimit is the maximum number of attributes allowed // per span event. Any attribute added after this limit reached will be // dropped. // // Setting this to zero means no attributes will be recorded for events. // // Setting this to a negative value means no limit is applied. AttributePerEventCountLimit int // AttributePerLinkCountLimit is the maximum number of attributes allowed // per span link. Any attribute added after this limit reached will be // dropped. // // Setting this to zero means no attributes will be recorded for links. // // Setting this to a negative value means no limit is applied. AttributePerLinkCountLimit int } // NewSpanLimits returns a SpanLimits with all limits set to the value their // corresponding environment variable holds, or the default if unset. // // • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT // (default: unlimited) // // • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) // // • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) // // • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: // 128) // // • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) // // • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) func NewSpanLimits() SpanLimits { return SpanLimits{ AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), } } opentelemetry-go-1.21.0/sdk/trace/span_limits_test.go000066400000000000000000000176431452547353200227060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/internal/env" ottest "go.opentelemetry.io/otel/sdk/internal/internaltest" "go.opentelemetry.io/otel/trace" ) func TestSettingSpanLimits(t *testing.T) { envLimits := func(val string) map[string]string { return map[string]string{ env.SpanAttributeValueLengthKey: val, env.SpanEventCountKey: val, env.SpanAttributeCountKey: val, env.SpanLinkCountKey: val, env.SpanEventAttributeCountKey: val, env.SpanLinkAttributeCountKey: val, } } limits := func(n int) *SpanLimits { lims := NewSpanLimits() lims.AttributeValueLengthLimit = n lims.AttributeCountLimit = n lims.EventCountLimit = n lims.LinkCountLimit = n lims.AttributePerEventCountLimit = n lims.AttributePerLinkCountLimit = n return &lims } tests := []struct { name string env map[string]string opt *SpanLimits rawOpt *SpanLimits want SpanLimits }{ { name: "defaults", want: NewSpanLimits(), }, { name: "env", env: envLimits("42"), want: *(limits(42)), }, { name: "opt", opt: limits(42), want: *(limits(42)), }, { name: "raw-opt", rawOpt: limits(42), want: *(limits(42)), }, { name: "opt-override", env: envLimits("-2"), // Option take priority. opt: limits(43), want: *(limits(43)), }, { name: "raw-opt-override", env: envLimits("-2"), // Option take priority. rawOpt: limits(43), want: *(limits(43)), }, { name: "last-opt-wins", opt: limits(-2), rawOpt: limits(-3), want: *(limits(-3)), }, { name: "env(unlimited)", // OTel spec says negative SpanLinkAttributeCountKey is invalid, // but since we will revert to the default (unlimited) which uses // negative values to signal this than this value is expected to // pass through. env: envLimits("-1"), want: *(limits(-1)), }, { name: "opt(unlimited)", // Corrects to defaults. opt: limits(-1), want: NewSpanLimits(), }, { name: "raw-opt(unlimited)", rawOpt: limits(-1), want: *(limits(-1)), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { if test.env != nil { es := ottest.NewEnvStore() t.Cleanup(func() { require.NoError(t, es.Restore()) }) for k, v := range test.env { es.Record(k) require.NoError(t, os.Setenv(k, v)) } } var opts []TracerProviderOption if test.opt != nil { opts = append(opts, WithSpanLimits(*test.opt)) } if test.rawOpt != nil { opts = append(opts, WithRawSpanLimits(*test.rawOpt)) } assert.Equal(t, test.want, NewTracerProvider(opts...).spanLimits) }) } } type recorder []ReadOnlySpan func (r *recorder) OnStart(context.Context, ReadWriteSpan) {} func (r *recorder) OnEnd(s ReadOnlySpan) { *r = append(*r, s) } func (r *recorder) ForceFlush(context.Context) error { return nil } func (r *recorder) Shutdown(context.Context) error { return nil } func testSpanLimits(t *testing.T, limits SpanLimits) ReadOnlySpan { rec := new(recorder) tp := NewTracerProvider(WithRawSpanLimits(limits), WithSpanProcessor(rec)) tracer := tp.Tracer("testSpanLimits") ctx := context.Background() a := []attribute.KeyValue{attribute.Bool("one", true), attribute.Bool("two", true)} l := trace.Link{ SpanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: [16]byte{0x01}, SpanID: [8]byte{0x01}, }), Attributes: a, } _, span := tracer.Start(ctx, "span-name", trace.WithLinks(l, l)) span.SetAttributes( attribute.String("string", "abc"), attribute.StringSlice("stringSlice", []string{"abc", "def"}), attribute.String("euro", "€"), // this is a 3-byte rune ) span.AddEvent("event 1", trace.WithAttributes(a...)) span.AddEvent("event 2", trace.WithAttributes(a...)) span.End() require.NoError(t, tp.Shutdown(ctx)) require.Len(t, *rec, 1, "exported spans") return (*rec)[0] } func TestSpanLimits(t *testing.T) { t.Run("AttributeValueLengthLimit", func(t *testing.T) { limits := NewSpanLimits() // Unlimited. limits.AttributeValueLengthLimit = -1 attrs := testSpanLimits(t, limits).Attributes() assert.Contains(t, attrs, attribute.String("string", "abc")) assert.Contains(t, attrs, attribute.StringSlice("stringSlice", []string{"abc", "def"})) assert.Contains(t, attrs, attribute.String("euro", "€")) limits.AttributeValueLengthLimit = 2 attrs = testSpanLimits(t, limits).Attributes() // Ensure string and string slice attributes are truncated. assert.Contains(t, attrs, attribute.String("string", "ab")) assert.Contains(t, attrs, attribute.StringSlice("stringSlice", []string{"ab", "de"})) assert.Contains(t, attrs, attribute.String("euro", "")) limits.AttributeValueLengthLimit = 0 attrs = testSpanLimits(t, limits).Attributes() assert.Contains(t, attrs, attribute.String("string", "")) assert.Contains(t, attrs, attribute.StringSlice("stringSlice", []string{"", ""})) assert.Contains(t, attrs, attribute.String("euro", "")) }) t.Run("AttributeCountLimit", func(t *testing.T) { limits := NewSpanLimits() // Unlimited. limits.AttributeCountLimit = -1 assert.Len(t, testSpanLimits(t, limits).Attributes(), 3) limits.AttributeCountLimit = 1 assert.Len(t, testSpanLimits(t, limits).Attributes(), 1) // Ensure this can be disabled. limits.AttributeCountLimit = 0 assert.Len(t, testSpanLimits(t, limits).Attributes(), 0) }) t.Run("EventCountLimit", func(t *testing.T) { limits := NewSpanLimits() // Unlimited. limits.EventCountLimit = -1 assert.Len(t, testSpanLimits(t, limits).Events(), 2) limits.EventCountLimit = 1 assert.Len(t, testSpanLimits(t, limits).Events(), 1) // Ensure this can be disabled. limits.EventCountLimit = 0 assert.Len(t, testSpanLimits(t, limits).Events(), 0) }) t.Run("AttributePerEventCountLimit", func(t *testing.T) { limits := NewSpanLimits() // Unlimited. limits.AttributePerEventCountLimit = -1 for _, e := range testSpanLimits(t, limits).Events() { assert.Len(t, e.Attributes, 2) } limits.AttributePerEventCountLimit = 1 for _, e := range testSpanLimits(t, limits).Events() { assert.Len(t, e.Attributes, 1) } // Ensure this can be disabled. limits.AttributePerEventCountLimit = 0 for _, e := range testSpanLimits(t, limits).Events() { assert.Len(t, e.Attributes, 0) } }) t.Run("LinkCountLimit", func(t *testing.T) { limits := NewSpanLimits() // Unlimited. limits.LinkCountLimit = -1 assert.Len(t, testSpanLimits(t, limits).Links(), 2) limits.LinkCountLimit = 1 assert.Len(t, testSpanLimits(t, limits).Links(), 1) // Ensure this can be disabled. limits.LinkCountLimit = 0 assert.Len(t, testSpanLimits(t, limits).Links(), 0) }) t.Run("AttributePerLinkCountLimit", func(t *testing.T) { limits := NewSpanLimits() // Unlimited. limits.AttributePerLinkCountLimit = -1 for _, l := range testSpanLimits(t, limits).Links() { assert.Len(t, l.Attributes, 2) } limits.AttributePerLinkCountLimit = 1 for _, l := range testSpanLimits(t, limits).Links() { assert.Len(t, l.Attributes, 1) } // Ensure this can be disabled. limits.AttributePerLinkCountLimit = 0 for _, l := range testSpanLimits(t, limits).Links() { assert.Len(t, l.Attributes, 0) } }) } opentelemetry-go-1.21.0/sdk/trace/span_processor.go000066400000000000000000000054201452547353200223530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "sync" ) // SpanProcessor is a processing pipeline for spans in the trace signal. // SpanProcessors registered with a TracerProvider and are called at the start // and end of a Span's lifecycle, and are called in the order they are // registered. type SpanProcessor interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // OnStart is called when a span is started. It is called synchronously // and should not block. OnStart(parent context.Context, s ReadWriteSpan) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // OnEnd is called when span is finished. It is called synchronously and // hence not block. OnEnd(s ReadOnlySpan) // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // Shutdown is called when the SDK shuts down. Any cleanup or release of // resources held by the processor should be done in this call. // // Calls to OnStart, OnEnd, or ForceFlush after this has been called // should be ignored. // // All timeouts and cancellations contained in ctx must be honored, this // should not block indefinitely. Shutdown(ctx context.Context) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. // ForceFlush exports all ended spans to the configured Exporter that have not yet // been exported. It should only be called when absolutely necessary, such as when // using a FaaS provider that may suspend the process after an invocation, but before // the Processor can export the completed spans. ForceFlush(ctx context.Context) error // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. } type spanProcessorState struct { sp SpanProcessor state sync.Once } func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { return &spanProcessorState{sp: sp} } type spanProcessorStates []*spanProcessorState opentelemetry-go-1.21.0/sdk/trace/span_processor_annotator_example_test.go000066400000000000000000000054401452547353200272140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "fmt" "go.opentelemetry.io/otel/attribute" ) /* Sometimes information about a runtime environment can change dynamically or be delayed from startup. Instead of continuously recreating and distributing a TracerProvider with an immutable Resource or delaying the startup of your application on a slow-loading piece of information, annotate the created spans dynamically using a SpanProcessor. */ var ( // owner represents the owner of the application. In this example it is // stored as a simple string, but in real-world use this may be the // response to an asynchronous request. owner = "unknown" ownerKey = attribute.Key("owner") ) // Annotator is a SpanProcessor that adds attributes to all started spans. type Annotator struct { // AttrsFunc is called when a span is started. The attributes it returns // are set on the Span being started. AttrsFunc func() []attribute.KeyValue } func (a Annotator) OnStart(_ context.Context, s ReadWriteSpan) { s.SetAttributes(a.AttrsFunc()...) } func (a Annotator) Shutdown(context.Context) error { return nil } func (a Annotator) ForceFlush(context.Context) error { return nil } func (a Annotator) OnEnd(s ReadOnlySpan) { attr := s.Attributes()[0] fmt.Printf("%s: %s\n", attr.Key, attr.Value.AsString()) } func ExampleSpanProcessor_annotated() { a := Annotator{ AttrsFunc: func() []attribute.KeyValue { return []attribute.KeyValue{ownerKey.String(owner)} }, } tracer := NewTracerProvider(WithSpanProcessor(a)).Tracer("annotated") // Simulate the situation where we want to annotate spans with an owner, // but at startup we do not now this information. Instead of waiting for // the owner to be known before starting and blocking here, start doing // work and update when the information becomes available. ctx := context.Background() _, s0 := tracer.Start(ctx, "span0") // Simulate an asynchronous call to determine the owner succeeding. We now // know that the owner of this application has been determined to be // Alice. Make sure all subsequent spans are annotated appropriately. owner = "alice" _, s1 := tracer.Start(ctx, "span1") s0.End() s1.End() // Output: // owner: unknown // owner: alice } opentelemetry-go-1.21.0/sdk/trace/span_processor_filter_example_test.go000066400000000000000000000061711452547353200264760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "time" ) // DurationFilter is a SpanProcessor that filters spans that have lifetimes // outside of a defined range. type DurationFilter struct { // Next is the next SpanProcessor in the chain. Next SpanProcessor // Min is the duration under which spans are dropped. Min time.Duration // Max is the duration over which spans are dropped. Max time.Duration } func (f DurationFilter) OnStart(parent context.Context, s ReadWriteSpan) { f.Next.OnStart(parent, s) } func (f DurationFilter) Shutdown(ctx context.Context) error { return f.Next.Shutdown(ctx) } func (f DurationFilter) ForceFlush(ctx context.Context) error { return f.Next.ForceFlush(ctx) } func (f DurationFilter) OnEnd(s ReadOnlySpan) { if f.Min > 0 && s.EndTime().Sub(s.StartTime()) < f.Min { // Drop short lived spans. return } if f.Max > 0 && s.EndTime().Sub(s.StartTime()) > f.Max { // Drop long lived spans. return } f.Next.OnEnd(s) } // InstrumentationBlacklist is a SpanProcessor that drops all spans from // certain instrumentation. type InstrumentationBlacklist struct { // Next is the next SpanProcessor in the chain. Next SpanProcessor // Blacklist is the set of instrumentation names for which spans will be // dropped. Blacklist map[string]bool } func (f InstrumentationBlacklist) OnStart(parent context.Context, s ReadWriteSpan) { f.Next.OnStart(parent, s) } func (f InstrumentationBlacklist) Shutdown(ctx context.Context) error { return f.Next.Shutdown(ctx) } func (f InstrumentationBlacklist) ForceFlush(ctx context.Context) error { return f.Next.ForceFlush(ctx) } func (f InstrumentationBlacklist) OnEnd(s ReadOnlySpan) { if f.Blacklist != nil && f.Blacklist[s.InstrumentationScope().Name] { // Drop spans from this instrumentation return } f.Next.OnEnd(s) } type noopExporter struct{} func (noopExporter) ExportSpans(context.Context, []ReadOnlySpan) error { return nil } func (noopExporter) Shutdown(context.Context) error { return nil } func ExampleSpanProcessor_filtered() { exportSP := NewSimpleSpanProcessor(noopExporter{}) // Build a SpanProcessor chain to filter out all spans from the pernicious // "naughty-instrumentation" dependency and only allow spans shorter than // an minute and longer than a second to be exported with the exportSP. filter := DurationFilter{ Next: InstrumentationBlacklist{ Next: exportSP, Blacklist: map[string]bool{ "naughty-instrumentation": true, }, }, Min: time.Second, Max: time.Minute, } _ = NewTracerProvider(WithSpanProcessor(filter)) // ... } opentelemetry-go-1.21.0/sdk/trace/span_processor_test.go000066400000000000000000000161241452547353200234150ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace_test import ( "context" "testing" "go.opentelemetry.io/otel/attribute" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) type testSpanProcessor struct { name string spansStarted []sdktrace.ReadWriteSpan spansEnded []sdktrace.ReadOnlySpan shutdownCount int } func (t *testSpanProcessor) OnStart(parent context.Context, s sdktrace.ReadWriteSpan) { if t == nil { return } psc := trace.SpanContextFromContext(parent) kv := []attribute.KeyValue{ { Key: "SpanProcessorName", Value: attribute.StringValue(t.name), }, // Store parent trace ID and span ID as attributes to be read later in // tests so that we "do something" with the parent argument. Real // SpanProcessor implementations will likely use the parent argument in // a more meaningful way. { Key: "ParentTraceID", Value: attribute.StringValue(psc.TraceID().String()), }, { Key: "ParentSpanID", Value: attribute.StringValue(psc.SpanID().String()), }, } s.AddEvent("OnStart", trace.WithAttributes(kv...)) t.spansStarted = append(t.spansStarted, s) } func (t *testSpanProcessor) OnEnd(s sdktrace.ReadOnlySpan) { if t == nil { return } t.spansEnded = append(t.spansEnded, s) } func (t *testSpanProcessor) Shutdown(_ context.Context) error { if t == nil { return nil } t.shutdownCount++ return nil } func (t *testSpanProcessor) ForceFlush(context.Context) error { if t == nil { return nil } return nil } func TestRegisterSpanProcessor(t *testing.T) { name := "Register span processor before span starts" tp := basicTracerProvider(t) spNames := []string{"sp1", "sp2", "sp3"} sps := NewNamedTestSpanProcessors(spNames) for _, sp := range sps { tp.RegisterSpanProcessor(sp) } tid, _ := trace.TraceIDFromHex("01020304050607080102040810203040") sid, _ := trace.SpanIDFromHex("0102040810203040") parent := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, SpanID: sid, }) ctx := trace.ContextWithRemoteSpanContext(context.Background(), parent) tr := tp.Tracer("SpanProcessor") _, span := tr.Start(ctx, "OnStart") span.End() wantCount := 1 for _, sp := range sps { gotCount := len(sp.spansStarted) if gotCount != wantCount { t.Errorf("%s: started count: got %d, want %d\n", name, gotCount, wantCount) } gotCount = len(sp.spansEnded) if gotCount != wantCount { t.Errorf("%s: ended count: got %d, want %d\n", name, gotCount, wantCount) } c := 0 tidOK := false sidOK := false for _, e := range sp.spansStarted[0].Events() { for _, kv := range e.Attributes { switch kv.Key { case "SpanProcessorName": gotValue := kv.Value.AsString() if gotValue != spNames[c] { t.Errorf("%s: attributes: got %s, want %s\n", name, gotValue, spNames[c]) } c++ case "ParentTraceID": gotValue := kv.Value.AsString() if gotValue != parent.TraceID().String() { t.Errorf("%s: attributes: got %s, want %s\n", name, gotValue, parent.TraceID()) } tidOK = true case "ParentSpanID": gotValue := kv.Value.AsString() if gotValue != parent.SpanID().String() { t.Errorf("%s: attributes: got %s, want %s\n", name, gotValue, parent.SpanID()) } sidOK = true default: continue } } } if c != len(spNames) { t.Errorf("%s: expected attributes(SpanProcessorName): got %d, want %d\n", name, c, len(spNames)) } if !tidOK { t.Errorf("%s: expected attributes(ParentTraceID)\n", name) } if !sidOK { t.Errorf("%s: expected attributes(ParentSpanID)\n", name) } } } func TestUnregisterSpanProcessor(t *testing.T) { name := "Start span after unregistering span processor" tp := basicTracerProvider(t) spNames := []string{"sp1", "sp2", "sp3"} sps := NewNamedTestSpanProcessors(spNames) for _, sp := range sps { tp.RegisterSpanProcessor(sp) } tr := tp.Tracer("SpanProcessor") _, span := tr.Start(context.Background(), "OnStart") span.End() for _, sp := range sps { tp.UnregisterSpanProcessor(sp) } // start another span after unregistering span processor. _, span = tr.Start(context.Background(), "Start span after unregister") span.End() for _, sp := range sps { wantCount := 1 gotCount := len(sp.spansStarted) if gotCount != wantCount { t.Errorf("%s: started count: got %d, want %d\n", name, gotCount, wantCount) } gotCount = len(sp.spansEnded) if gotCount != wantCount { t.Errorf("%s: ended count: got %d, want %d\n", name, gotCount, wantCount) } } } func TestUnregisterSpanProcessorWhileSpanIsActive(t *testing.T) { name := "Unregister span processor while span is active" tp := basicTracerProvider(t) sp := NewTestSpanProcessor("sp") tp.RegisterSpanProcessor(sp) tr := tp.Tracer("SpanProcessor") _, span := tr.Start(context.Background(), "OnStart") tp.UnregisterSpanProcessor(sp) span.End() wantCount := 1 gotCount := len(sp.spansStarted) if gotCount != wantCount { t.Errorf("%s: started count: got %d, want %d\n", name, gotCount, wantCount) } wantCount = 0 gotCount = len(sp.spansEnded) if gotCount != wantCount { t.Errorf("%s: ended count: got %d, want %d\n", name, gotCount, wantCount) } } func TestSpanProcessorShutdown(t *testing.T) { name := "Increment shutdown counter of a span processor" tp := basicTracerProvider(t) sp := NewTestSpanProcessor("sp") tp.RegisterSpanProcessor(sp) wantCount := 1 err := sp.Shutdown(context.Background()) if err != nil { t.Error("Error shutting the testSpanProcessor down\n") } gotCount := sp.shutdownCount if wantCount != gotCount { t.Errorf("%s: wrong counter: got %d, want %d\n", name, gotCount, wantCount) } } func TestMultipleUnregisterSpanProcessorCalls(t *testing.T) { name := "Increment shutdown counter after first UnregisterSpanProcessor call" tp := basicTracerProvider(t) sp := NewTestSpanProcessor("sp") wantCount := 1 tp.RegisterSpanProcessor(sp) tp.UnregisterSpanProcessor(sp) gotCount := sp.shutdownCount if wantCount != gotCount { t.Errorf("%s: wrong counter: got %d, want %d\n", name, gotCount, wantCount) } // Multiple UnregisterSpanProcessor should not trigger multiple Shutdown calls. tp.UnregisterSpanProcessor(sp) gotCount = sp.shutdownCount if wantCount != gotCount { t.Errorf("%s: wrong counter: got %d, want %d\n", name, gotCount, wantCount) } } func NewTestSpanProcessor(name string) *testSpanProcessor { return &testSpanProcessor{name: name} } func NewNamedTestSpanProcessors(names []string) []*testSpanProcessor { tsp := []*testSpanProcessor{} for _, n := range names { tsp = append(tsp, NewTestSpanProcessor(n)) } return tsp } opentelemetry-go-1.21.0/sdk/trace/span_test.go000066400000000000000000000132641452547353200213200ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "bytes" "fmt" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) func TestSetStatus(t *testing.T) { tests := []struct { name string span recordingSpan code codes.Code description string expected Status }{ { "Error and description should overwrite Unset", recordingSpan{}, codes.Error, "description", Status{Code: codes.Error, Description: "description"}, }, { "Ok should overwrite Unset and ignore description", recordingSpan{}, codes.Ok, "description", Status{Code: codes.Ok}, }, { "Error and description should return error and overwrite description", recordingSpan{status: Status{Code: codes.Error, Description: "d1"}}, codes.Error, "d2", Status{Code: codes.Error, Description: "d2"}, }, { "Ok should overwrite error and remove description", recordingSpan{status: Status{Code: codes.Error, Description: "d1"}}, codes.Ok, "d2", Status{Code: codes.Ok}, }, { "Error and description should be ignored when already Ok", recordingSpan{status: Status{Code: codes.Ok}}, codes.Error, "d2", Status{Code: codes.Ok}, }, { "Ok should be noop when already Ok", recordingSpan{status: Status{Code: codes.Ok}}, codes.Ok, "d2", Status{Code: codes.Ok}, }, { "Unset should be noop when already Ok", recordingSpan{status: Status{Code: codes.Ok}}, codes.Unset, "d2", Status{Code: codes.Ok}, }, { "Unset should be noop when already Error", recordingSpan{status: Status{Code: codes.Error, Description: "d1"}}, codes.Unset, "d2", Status{Code: codes.Error, Description: "d1"}, }, } for i := range tests { tc := &tests[i] t.Run(tc.name, func(t *testing.T) { tc.span.SetStatus(tc.code, tc.description) assert.Equal(t, tc.expected, tc.span.status) }) } } func TestTruncateAttr(t *testing.T) { const key = "key" strAttr := attribute.String(key, "value") strSliceAttr := attribute.StringSlice(key, []string{"value-0", "value-1"}) tests := []struct { limit int attr, want attribute.KeyValue }{ { limit: -1, attr: strAttr, want: strAttr, }, { limit: -1, attr: strSliceAttr, want: strSliceAttr, }, { limit: 0, attr: attribute.Bool(key, true), want: attribute.Bool(key, true), }, { limit: 0, attr: attribute.BoolSlice(key, []bool{true, false}), want: attribute.BoolSlice(key, []bool{true, false}), }, { limit: 0, attr: attribute.Int(key, 42), want: attribute.Int(key, 42), }, { limit: 0, attr: attribute.IntSlice(key, []int{42, -1}), want: attribute.IntSlice(key, []int{42, -1}), }, { limit: 0, attr: attribute.Int64(key, 42), want: attribute.Int64(key, 42), }, { limit: 0, attr: attribute.Int64Slice(key, []int64{42, -1}), want: attribute.Int64Slice(key, []int64{42, -1}), }, { limit: 0, attr: attribute.Float64(key, 42), want: attribute.Float64(key, 42), }, { limit: 0, attr: attribute.Float64Slice(key, []float64{42, -1}), want: attribute.Float64Slice(key, []float64{42, -1}), }, { limit: 0, attr: strAttr, want: attribute.String(key, ""), }, { limit: 0, attr: strSliceAttr, want: attribute.StringSlice(key, []string{"", ""}), }, { limit: 0, attr: attribute.Stringer(key, bytes.NewBufferString("value")), want: attribute.String(key, ""), }, { limit: 1, attr: strAttr, want: attribute.String(key, "v"), }, { limit: 1, attr: strSliceAttr, want: attribute.StringSlice(key, []string{"v", "v"}), }, { limit: 5, attr: strAttr, want: strAttr, }, { limit: 7, attr: strSliceAttr, want: strSliceAttr, }, { limit: 6, attr: attribute.StringSlice(key, []string{"value", "value-1"}), want: attribute.StringSlice(key, []string{"value", "value-"}), }, { limit: 128, attr: strAttr, want: strAttr, }, { limit: 128, attr: strSliceAttr, want: strSliceAttr, }, { // This tests the ordinary safeTruncate(). limit: 10, attr: attribute.String(key, "€€€€"), // 3 bytes each want: attribute.String(key, "€€€"), }, { // This tests truncation with an invalid UTF-8 input. // // Note that after removing the invalid rune, // the string is over length and still has to // be truncated on a code point boundary. limit: 10, attr: attribute.String(key, "€"[0:2]+"hello€€"), // corrupted first rune, then over limit want: attribute.String(key, "hello€"), }, { // This tests the fallback to invalidTruncate() // where after validation the string does not require // truncation. limit: 6, attr: attribute.String(key, "€"[0:2]+"hello"), // corrupted first rune, then not over limit want: attribute.String(key, "hello"), }, } for _, test := range tests { name := fmt.Sprintf("%s->%s(limit:%d)", test.attr.Key, test.attr.Value.Emit(), test.limit) t.Run(name, func(t *testing.T) { assert.Equal(t, test.want, truncateAttr(test.limit, test.attr)) }) } } opentelemetry-go-1.21.0/sdk/trace/trace_test.go000066400000000000000000001643741452547353200214660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "errors" "fmt" "math" "strconv" "strings" "sync" "sync/atomic" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" ottest "go.opentelemetry.io/otel/sdk/internal/internaltest" "go.opentelemetry.io/otel/sdk/resource" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" "go.opentelemetry.io/otel/trace" ) const envVar = "OTEL_RESOURCE_ATTRIBUTES" type storingHandler struct { errs []error } func (s *storingHandler) Handle(err error) { s.errs = append(s.errs, err) } func (s *storingHandler) Reset() { s.errs = nil } var ( tid trace.TraceID sid trace.SpanID sc trace.SpanContext handler = &storingHandler{} ) func init() { tid, _ = trace.TraceIDFromHex("01020304050607080102040810203040") sid, _ = trace.SpanIDFromHex("0102040810203040") sc = trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, SpanID: sid, TraceFlags: 0x1, }) otel.SetErrorHandler(handler) } func TestTracerFollowsExpectedAPIBehaviour(t *testing.T) { harness := ottest.NewHarness(t) harness.TestTracerProvider(func() trace.TracerProvider { return NewTracerProvider(WithSampler(TraceIDRatioBased(0))) }) tp := NewTracerProvider(WithSampler(TraceIDRatioBased(0))) harness.TestTracer(func() trace.Tracer { return tp.Tracer("") }) } type testExporter struct { mu sync.RWMutex idx map[string]int spans []*snapshot } func NewTestExporter() *testExporter { return &testExporter{idx: make(map[string]int)} } func (te *testExporter) ExportSpans(_ context.Context, spans []ReadOnlySpan) error { te.mu.Lock() defer te.mu.Unlock() i := len(te.spans) for _, s := range spans { te.idx[s.Name()] = i te.spans = append(te.spans, s.(*snapshot)) i++ } return nil } func (te *testExporter) Spans() []*snapshot { te.mu.RLock() defer te.mu.RUnlock() cp := make([]*snapshot, len(te.spans)) copy(cp, te.spans) return cp } func (te *testExporter) GetSpan(name string) (*snapshot, bool) { te.mu.RLock() defer te.mu.RUnlock() i, ok := te.idx[name] if !ok { return nil, false } return te.spans[i], true } func (te *testExporter) Len() int { te.mu.RLock() defer te.mu.RUnlock() return len(te.spans) } func (te *testExporter) Shutdown(context.Context) error { te.Reset() return nil } func (te *testExporter) Reset() { te.mu.Lock() defer te.mu.Unlock() te.idx = make(map[string]int) te.spans = te.spans[:0] } type testSampler struct { callCount int prefix string t *testing.T } func (ts *testSampler) ShouldSample(p SamplingParameters) SamplingResult { ts.callCount++ ts.t.Logf("called sampler for name %q", p.Name) decision := Drop if strings.HasPrefix(p.Name, ts.prefix) { decision = RecordAndSample } return SamplingResult{Decision: decision, Attributes: []attribute.KeyValue{attribute.Int("callCount", ts.callCount)}} } func (ts testSampler) Description() string { return "testSampler" } func TestSetName(t *testing.T) { tp := NewTracerProvider() type testCase struct { name string newName string } for idx, tt := range []testCase{ { // 0 name: "foobar", newName: "foobaz", }, { // 1 name: "foobar", newName: "barbaz", }, { // 2 name: "barbar", newName: "barbaz", }, { // 3 name: "barbar", newName: "foobar", }, } { sp := startNamedSpan(tp, "SetName", tt.name) if sdkspan, ok := sp.(*recordingSpan); ok { if sdkspan.Name() != tt.name { t.Errorf("%d: invalid name at span creation, expected %v, got %v", idx, tt.name, sdkspan.Name()) } } else { t.Errorf("%d: unable to coerce span to SDK span, is type %T", idx, sp) } sp.SetName(tt.newName) if sdkspan, ok := sp.(*recordingSpan); ok { if sdkspan.Name() != tt.newName { t.Errorf("%d: span name not changed, expected %v, got %v", idx, tt.newName, sdkspan.Name()) } } else { t.Errorf("%d: unable to coerce span to SDK span, is type %T", idx, sp) } sp.End() } } func TestSpanIsRecording(t *testing.T) { t.Run("while Span active", func(t *testing.T) { for name, tc := range map[string]struct { sampler Sampler want bool }{ "Always sample, recording on": {sampler: AlwaysSample(), want: true}, "Never sample recording off": {sampler: NeverSample(), want: false}, } { tp := NewTracerProvider(WithSampler(tc.sampler)) _, span := tp.Tracer(name).Start(context.Background(), "StartSpan") got := span.IsRecording() span.End() assert.Equal(t, got, tc.want, name) } }) t.Run("after Span end", func(t *testing.T) { for name, tc := range map[string]Sampler{ "Always Sample": AlwaysSample(), "Never Sample": NeverSample(), } { tp := NewTracerProvider(WithSampler(tc)) _, span := tp.Tracer(name).Start(context.Background(), "StartSpan") span.End() got := span.IsRecording() assert.False(t, got, name) } }) } func TestSampling(t *testing.T) { idg := defaultIDGenerator() const total = 10000 for name, tc := range map[string]struct { sampler Sampler expect float64 parent bool sampledParent bool }{ // Span w/o a parent "NeverSample": {sampler: NeverSample(), expect: 0}, "AlwaysSample": {sampler: AlwaysSample(), expect: 1.0}, "TraceIdRatioBased_-1": {sampler: TraceIDRatioBased(-1.0), expect: 0}, "TraceIdRatioBased_.25": {sampler: TraceIDRatioBased(0.25), expect: .25}, "TraceIdRatioBased_.50": {sampler: TraceIDRatioBased(0.50), expect: .5}, "TraceIdRatioBased_.75": {sampler: TraceIDRatioBased(0.75), expect: .75}, "TraceIdRatioBased_2.0": {sampler: TraceIDRatioBased(2.0), expect: 1}, // Spans w/o a parent and using ParentBased(DelegateSampler()) Sampler, receive DelegateSampler's sampling decision "ParentNeverSample": {sampler: ParentBased(NeverSample()), expect: 0}, "ParentAlwaysSample": {sampler: ParentBased(AlwaysSample()), expect: 1}, "ParentTraceIdRatioBased_.50": {sampler: ParentBased(TraceIDRatioBased(0.50)), expect: .5}, // An unadorned TraceIDRatioBased sampler ignores parent spans "UnsampledParentSpanWithTraceIdRatioBased_.25": {sampler: TraceIDRatioBased(0.25), expect: .25, parent: true}, "SampledParentSpanWithTraceIdRatioBased_.25": {sampler: TraceIDRatioBased(0.25), expect: .25, parent: true, sampledParent: true}, "UnsampledParentSpanWithTraceIdRatioBased_.50": {sampler: TraceIDRatioBased(0.50), expect: .5, parent: true}, "SampledParentSpanWithTraceIdRatioBased_.50": {sampler: TraceIDRatioBased(0.50), expect: .5, parent: true, sampledParent: true}, "UnsampledParentSpanWithTraceIdRatioBased_.75": {sampler: TraceIDRatioBased(0.75), expect: .75, parent: true}, "SampledParentSpanWithTraceIdRatioBased_.75": {sampler: TraceIDRatioBased(0.75), expect: .75, parent: true, sampledParent: true}, // Spans with a sampled parent but using NeverSample Sampler, are not sampled "SampledParentSpanWithNeverSample": {sampler: NeverSample(), expect: 0, parent: true, sampledParent: true}, // Spans with a sampled parent and using ParentBased(DelegateSampler()) Sampler, inherit the parent span's sampling status "SampledParentSpanWithParentNeverSample": {sampler: ParentBased(NeverSample()), expect: 1, parent: true, sampledParent: true}, "UnsampledParentSpanWithParentNeverSampler": {sampler: ParentBased(NeverSample()), expect: 0, parent: true, sampledParent: false}, "SampledParentSpanWithParentAlwaysSampler": {sampler: ParentBased(AlwaysSample()), expect: 1, parent: true, sampledParent: true}, "UnsampledParentSpanWithParentAlwaysSampler": {sampler: ParentBased(AlwaysSample()), expect: 0, parent: true, sampledParent: false}, "SampledParentSpanWithParentTraceIdRatioBased_.50": {sampler: ParentBased(TraceIDRatioBased(0.50)), expect: 1, parent: true, sampledParent: true}, "UnsampledParentSpanWithParentTraceIdRatioBased_.50": {sampler: ParentBased(TraceIDRatioBased(0.50)), expect: 0, parent: true, sampledParent: false}, } { tc := tc t.Run(name, func(t *testing.T) { t.Parallel() p := NewTracerProvider(WithSampler(tc.sampler)) tr := p.Tracer("test") var sampled int for i := 0; i < total; i++ { ctx := context.Background() if tc.parent { tid, sid := idg.NewIDs(ctx) psc := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, SpanID: sid, }) if tc.sampledParent { psc = psc.WithTraceFlags(trace.FlagsSampled) } ctx = trace.ContextWithRemoteSpanContext(ctx, psc) } _, span := tr.Start(ctx, "test") if span.SpanContext().IsSampled() { sampled++ } } tolerance := 0.0 got := float64(sampled) / float64(total) if tc.expect > 0 && tc.expect < 1 { // See https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval const z = 4.75342 // This should succeed 99.9999% of the time tolerance = z * math.Sqrt(got*(1-got)/total) } diff := math.Abs(got - tc.expect) if diff > tolerance { t.Errorf("got %f (diff: %f), expected %f (w/tolerance: %f)", got, diff, tc.expect, tolerance) } }) } } func TestStartSpanWithParent(t *testing.T) { tp := NewTracerProvider() tr := tp.Tracer("SpanWithParent") ctx := context.Background() _, s1 := tr.Start(trace.ContextWithRemoteSpanContext(ctx, sc), "span1-unsampled-parent1") if err := checkChild(t, sc, s1); err != nil { t.Error(err) } _, s2 := tr.Start(trace.ContextWithRemoteSpanContext(ctx, sc), "span2-unsampled-parent1") if err := checkChild(t, sc, s2); err != nil { t.Error(err) } ts, err := trace.ParseTraceState("k=v") if err != nil { t.Error(err) } sc2 := sc.WithTraceState(ts) _, s3 := tr.Start(trace.ContextWithRemoteSpanContext(ctx, sc2), "span3-sampled-parent2") if err := checkChild(t, sc2, s3); err != nil { t.Error(err) } ctx2, s4 := tr.Start(trace.ContextWithRemoteSpanContext(ctx, sc2), "span4-sampled-parent2") if err := checkChild(t, sc2, s4); err != nil { t.Error(err) } s4Sc := s4.SpanContext() _, s5 := tr.Start(ctx2, "span5-implicit-childof-span4") if err := checkChild(t, s4Sc, s5); err != nil { t.Error(err) } } // Test we get a successful span as a new root if a nil context is sent in, as opposed to a panic. // See https://github.com/open-telemetry/opentelemetry-go/issues/3109 func TestStartSpanWithNilContext(t *testing.T) { tp := NewTracerProvider() tr := tp.Tracer("NoPanic") // nolint:staticcheck // no nil context, but that's the point of the test. assert.NotPanics(t, func() { tr.Start(nil, "should-not-panic") }) } func TestStartSpanNewRootNotSampled(t *testing.T) { alwaysSampleTp := NewTracerProvider() sampledTr := alwaysSampleTp.Tracer("AlwaysSampled") neverSampleTp := NewTracerProvider(WithSampler(ParentBased(NeverSample()))) neverSampledTr := neverSampleTp.Tracer("ParentBasedNeverSample") ctx := context.Background() ctx, s1 := sampledTr.Start(trace.ContextWithRemoteSpanContext(ctx, sc), "span1-sampled") if err := checkChild(t, sc, s1); err != nil { t.Error(err) } _, s2 := neverSampledTr.Start(ctx, "span2-no-newroot") if !s2.SpanContext().IsSampled() { t.Error(fmt.Errorf("got child span is not sampled, want child span with sampler: ParentBased(NeverSample()) to be sampled")) } // Adding WithNewRoot causes child spans to not sample based on parent context _, s3 := neverSampledTr.Start(ctx, "span3-newroot", trace.WithNewRoot()) if s3.SpanContext().IsSampled() { t.Error(fmt.Errorf("got child span is sampled, want child span WithNewRoot() and with sampler: ParentBased(NeverSample()) to not be sampled")) } } func TestSetSpanAttributesOnStart(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "StartSpanAttribute", trace.WithAttributes(attribute.String("key1", "value1")), trace.WithAttributes(attribute.String("key2", "value2")), ) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", attributes: []attribute.KeyValue{ attribute.String("key1", "value1"), attribute.String("key2", "value2"), }, spanKind: trace.SpanKindInternal, instrumentationScope: instrumentation.Scope{Name: "StartSpanAttribute"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("SetSpanAttributesOnStart: -got +want %s", diff) } } func TestSamplerAttributesLocalChildSpan(t *testing.T) { sampler := &testSampler{prefix: "span", t: t} te := NewTestExporter() tp := NewTracerProvider(WithSampler(sampler), WithSyncer(te), WithResource(resource.Empty())) ctx := context.Background() ctx, span := startLocalSpan(ctx, tp, "SpanOne", "span0") _, spanTwo := startLocalSpan(ctx, tp, "SpanTwo", "span1") spanTwo.End() span.End() got := te.Spans() require.Len(t, got, 2) // FILO order above means spanTwo <-> gotSpan0 and span <-> gotSpan1. gotSpan0, gotSpan1 := got[0], got[1] // Ensure sampler is called for local child spans by verifying the // attributes set by the sampler are set on the child span. assert.Equal(t, []attribute.KeyValue{attribute.Int("callCount", 2)}, gotSpan0.Attributes()) assert.Equal(t, []attribute.KeyValue{attribute.Int("callCount", 1)}, gotSpan1.Attributes()) } func TestSpanSetAttributes(t *testing.T) { attrs := [...]attribute.KeyValue{ attribute.String("key1", "value1"), attribute.String("key2", "value2"), attribute.String("key3", "value3"), attribute.String("key4", "value4"), attribute.String("key1", "value5"), attribute.String("key2", "value6"), attribute.String("key3", "value7"), } invalid := attribute.KeyValue{} tests := []struct { name string input [][]attribute.KeyValue wantAttrs []attribute.KeyValue wantDropped int }{ { name: "array", input: [][]attribute.KeyValue{attrs[:3]}, wantAttrs: attrs[:3], }, { name: "single_value:array", input: [][]attribute.KeyValue{attrs[:1], attrs[1:3]}, wantAttrs: attrs[:3], }, { name: "array:single_value", input: [][]attribute.KeyValue{attrs[:2], attrs[2:3]}, wantAttrs: attrs[:3], }, { name: "single_values", input: [][]attribute.KeyValue{attrs[:1], attrs[1:2], attrs[2:3]}, wantAttrs: attrs[:3], }, // The tracing specification states: // // For each unique attribute key, addition of which would result in // exceeding the limit, SDK MUST discard that key/value pair // // Therefore, adding attributes after the capacity is reached should // result in those attributes being dropped. { name: "drop_last_added", input: [][]attribute.KeyValue{attrs[:3], attrs[3:4], attrs[3:4]}, wantAttrs: attrs[:3], wantDropped: 2, }, // The tracing specification states: // // Setting an attribute with the same key as an existing attribute // SHOULD overwrite the existing attribute's value. // // Therefore, attributes are updated regardless of capacity state. { name: "single_value_update", input: [][]attribute.KeyValue{attrs[:1], attrs[:3]}, wantAttrs: attrs[:3], }, { name: "all_update", input: [][]attribute.KeyValue{attrs[:3], attrs[4:7]}, wantAttrs: attrs[4:7], }, { name: "all_update/multi", input: [][]attribute.KeyValue{attrs[:3], attrs[4:7], attrs[:3]}, wantAttrs: attrs[:3], }, { name: "deduplicate/under_capacity", input: [][]attribute.KeyValue{attrs[:1], attrs[:1], attrs[:1]}, wantAttrs: attrs[:1], }, { name: "deduplicate/over_capacity", input: [][]attribute.KeyValue{attrs[:1], attrs[:1], attrs[:1], attrs[:3]}, wantAttrs: attrs[:3], }, { name: "deduplicate/added", input: [][]attribute.KeyValue{ attrs[:2], {attrs[2], attrs[2], attrs[2]}, }, wantAttrs: attrs[:3], }, { name: "deduplicate/added_at_cappacity", input: [][]attribute.KeyValue{ attrs[:3], {attrs[2], attrs[2], attrs[2]}, }, wantAttrs: attrs[:3], }, { name: "invalid", input: [][]attribute.KeyValue{ {invalid}, }, wantDropped: 1, }, { name: "invalid_with_valid", input: [][]attribute.KeyValue{ {invalid, attrs[0]}, }, wantAttrs: attrs[:1], wantDropped: 1, }, { name: "invalid_over_capacity", input: [][]attribute.KeyValue{ {invalid, invalid, invalid, invalid, attrs[0]}, }, wantAttrs: attrs[:1], wantDropped: 4, }, { name: "valid:invalid/under_capacity", input: [][]attribute.KeyValue{ attrs[:1], {invalid}, }, wantAttrs: attrs[:1], wantDropped: 1, }, { name: "valid:invalid/over_capacity", input: [][]attribute.KeyValue{ attrs[:1], {invalid, invalid, invalid, invalid}, }, wantAttrs: attrs[:1], wantDropped: 4, }, { name: "valid_at_capacity:invalid", input: [][]attribute.KeyValue{ attrs[:3], {invalid, invalid, invalid, invalid}, }, wantAttrs: attrs[:3], wantDropped: 4, }, } const ( capacity = 3 instName = "TestSpanAttributeCapacity" spanName = "test span" ) for _, test := range tests { t.Run(test.name, func(t *testing.T) { te := NewTestExporter() sl := NewSpanLimits() sl.AttributeCountLimit = capacity tp := NewTracerProvider(WithSyncer(te), WithSpanLimits(sl)) _, span := tp.Tracer(instName).Start(context.Background(), spanName) for _, a := range test.input { span.SetAttributes(a...) } span.End() require.Implements(t, (*ReadOnlySpan)(nil), span) roSpan := span.(ReadOnlySpan) // Ensure the span itself is valid. assert.ElementsMatch(t, test.wantAttrs, roSpan.Attributes(), "expected attributes") assert.Equal(t, test.wantDropped, roSpan.DroppedAttributes(), "dropped attributes") snap, ok := te.GetSpan(spanName) require.Truef(t, ok, "span %s not exported", spanName) // Ensure the exported span snapshot is valid. assert.ElementsMatch(t, test.wantAttrs, snap.Attributes(), "expected attributes") assert.Equal(t, test.wantDropped, snap.DroppedAttributes(), "dropped attributes") }) } } func TestEvents(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "Events") k1v1 := attribute.String("key1", "value1") k2v2 := attribute.Bool("key2", true) k3v3 := attribute.Int64("key3", 3) span.AddEvent("foo", trace.WithAttributes(attribute.String("key1", "value1"))) span.AddEvent("bar", trace.WithAttributes( attribute.Bool("key2", true), attribute.Int64("key3", 3), )) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } for i := range got.Events() { if !checkTime(&got.Events()[i].Time) { t.Error("exporting span: expected nonzero Event Time") } } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", events: []Event{ {Name: "foo", Attributes: []attribute.KeyValue{k1v1}}, {Name: "bar", Attributes: []attribute.KeyValue{k2v2, k3v3}}, }, spanKind: trace.SpanKindInternal, instrumentationScope: instrumentation.Scope{Name: "Events"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("Message Events: -got +want %s", diff) } } func TestEventsOverLimit(t *testing.T) { te := NewTestExporter() sl := NewSpanLimits() sl.EventCountLimit = 2 tp := NewTracerProvider(WithSpanLimits(sl), WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "EventsOverLimit") k1v1 := attribute.String("key1", "value1") k2v2 := attribute.Bool("key2", false) k3v3 := attribute.String("key3", "value3") span.AddEvent("fooDrop", trace.WithAttributes(attribute.String("key1", "value1"))) span.AddEvent("barDrop", trace.WithAttributes( attribute.Bool("key2", true), attribute.String("key3", "value3"), )) span.AddEvent("foo", trace.WithAttributes(attribute.String("key1", "value1"))) span.AddEvent("bar", trace.WithAttributes( attribute.Bool("key2", false), attribute.String("key3", "value3"), )) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } for i := range got.Events() { if !checkTime(&got.Events()[i].Time) { t.Error("exporting span: expected nonzero Event Time") } } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", events: []Event{ {Name: "foo", Attributes: []attribute.KeyValue{k1v1}}, {Name: "bar", Attributes: []attribute.KeyValue{k2v2, k3v3}}, }, droppedEventCount: 2, spanKind: trace.SpanKindInternal, instrumentationScope: instrumentation.Scope{Name: "EventsOverLimit"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("Message Event over limit: -got +want %s", diff) } } func TestLinks(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) k1v1 := attribute.String("key1", "value1") k2v2 := attribute.String("key2", "value2") k3v3 := attribute.String("key3", "value3") sc1 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) sc2 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) l1 := trace.Link{SpanContext: sc1, Attributes: []attribute.KeyValue{k1v1}} l2 := trace.Link{SpanContext: sc2, Attributes: []attribute.KeyValue{k2v2, k3v3}} links := []trace.Link{l1, l2} span := startSpan(tp, "Links", trace.WithLinks(links...)) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", links: []Link{{l1.SpanContext, l1.Attributes, 0}, {l2.SpanContext, l2.Attributes, 0}}, spanKind: trace.SpanKindInternal, instrumentationScope: instrumentation.Scope{Name: "Links"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("Link: -got +want %s", diff) } sc1 = trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) span1 := startSpan(tp, "name", trace.WithLinks([]trace.Link{ {SpanContext: trace.SpanContext{}}, {SpanContext: sc1}, }...)) sdkspan, _ := span1.(*recordingSpan) require.Len(t, sdkspan.Links(), 1) } func TestLinksOverLimit(t *testing.T) { te := NewTestExporter() sc1 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) sc2 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) sc3 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) sl := NewSpanLimits() sl.LinkCountLimit = 2 tp := NewTracerProvider(WithSpanLimits(sl), WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "LinksOverLimit", trace.WithLinks( trace.Link{SpanContext: sc1, Attributes: []attribute.KeyValue{attribute.String("key1", "value1")}}, trace.Link{SpanContext: sc2, Attributes: []attribute.KeyValue{attribute.String("key2", "value2")}}, trace.Link{SpanContext: sc3, Attributes: []attribute.KeyValue{attribute.String("key3", "value3")}}, ), ) k2v2 := attribute.String("key2", "value2") k3v3 := attribute.String("key3", "value3") got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", links: []Link{ {SpanContext: sc2, Attributes: []attribute.KeyValue{k2v2}, DroppedAttributeCount: 0}, {SpanContext: sc3, Attributes: []attribute.KeyValue{k3v3}, DroppedAttributeCount: 0}, }, droppedLinkCount: 1, spanKind: trace.SpanKindInternal, instrumentationScope: instrumentation.Scope{Name: "LinksOverLimit"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("Link over limit: -got +want %s", diff) } } func TestSetSpanName(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) ctx := context.Background() want := "SpanName-1" ctx = trace.ContextWithRemoteSpanContext(ctx, sc) _, span := tp.Tracer("SetSpanName").Start(ctx, "SpanName-1") got, err := endSpan(te, span) if err != nil { t.Fatal(err) } if got.Name() != want { t.Errorf("span.Name: got %q; want %q", got.Name(), want) } } func TestSetSpanStatus(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "SpanStatus") span.SetStatus(codes.Error, "Error") got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", spanKind: trace.SpanKindInternal, status: Status{ Code: codes.Error, Description: "Error", }, instrumentationScope: instrumentation.Scope{Name: "SpanStatus"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("SetSpanStatus: -got +want %s", diff) } } func TestSetSpanStatusWithoutMessageWhenStatusIsNotError(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "SpanStatus") span.SetStatus(codes.Ok, "This message will be ignored") got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", spanKind: trace.SpanKindInternal, status: Status{ Code: codes.Ok, Description: "", }, instrumentationScope: instrumentation.Scope{Name: "SpanStatus"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("SetSpanStatus: -got +want %s", diff) } } func cmpDiff(x, y interface{}) string { return cmp.Diff(x, y, cmp.AllowUnexported(snapshot{}), cmp.AllowUnexported(attribute.Value{}), cmp.AllowUnexported(Event{}), cmp.AllowUnexported(trace.TraceState{})) } // checkChild is test utility function that tests that c has fields set appropriately, // given that it is a child span of p. func checkChild(t *testing.T, p trace.SpanContext, apiSpan trace.Span) error { s := apiSpan.(*recordingSpan) if s == nil { return fmt.Errorf("got nil child span, want non-nil") } if got, want := s.spanContext.TraceID().String(), p.TraceID().String(); got != want { return fmt.Errorf("got child trace ID %s, want %s", got, want) } if childID, parentID := s.spanContext.SpanID().String(), p.SpanID().String(); childID == parentID { return fmt.Errorf("got child span ID %s, parent span ID %s; want unequal IDs", childID, parentID) } if got, want := s.spanContext.TraceFlags(), p.TraceFlags(); got != want { return fmt.Errorf("got child trace options %d, want %d", got, want) } got, want := s.spanContext.TraceState(), p.TraceState() assert.Equal(t, want, got) return nil } // startSpan starts a span with a name "span0". See startNamedSpan for // details. func startSpan(tp *TracerProvider, trName string, args ...trace.SpanStartOption) trace.Span { return startNamedSpan(tp, trName, "span0", args...) } // startNamed Span is a test utility func that starts a span with a // passed name and with remote span context as parent. The remote span // context contains TraceFlags with sampled bit set. This allows the // span to be automatically sampled. func startNamedSpan(tp *TracerProvider, trName, name string, args ...trace.SpanStartOption) trace.Span { _, span := tp.Tracer(trName).Start( trace.ContextWithRemoteSpanContext(context.Background(), sc), name, args..., ) return span } // startLocalSpan is a test utility func that starts a span with a // passed name and with the passed context. The context is returned // along with the span so this parent can be used to create child // spans. func startLocalSpan(ctx context.Context, tp *TracerProvider, trName, name string, args ...trace.SpanStartOption) (context.Context, trace.Span) { ctx, span := tp.Tracer(trName).Start( ctx, name, args..., ) return ctx, span } // endSpan is a test utility function that ends the span in the context and // returns the exported span. // It requires that span be sampled using one of these methods // 1. Passing parent span context in context // 2. Use WithSampler(AlwaysSample()) // 3. Configuring AlwaysSample() as default sampler // // It also does some basic tests on the span. // It also clears spanID in the to make the comparison easier. func endSpan(te *testExporter, span trace.Span) (*snapshot, error) { if !span.IsRecording() { return nil, fmt.Errorf("method IsRecording: got false, want true") } if !span.SpanContext().IsSampled() { return nil, fmt.Errorf("method IsSampled: got false, want true") } span.End() if te.Len() != 1 { return nil, fmt.Errorf("got %d exported spans, want one span", te.Len()) } got := te.Spans()[0] if !got.SpanContext().SpanID().IsValid() { return nil, fmt.Errorf("exporting span: expected nonzero SpanID") } got.spanContext = got.SpanContext().WithSpanID(trace.SpanID{}) if !checkTime(&got.startTime) { return nil, fmt.Errorf("exporting span: expected nonzero StartTime") } if !checkTime(&got.endTime) { return nil, fmt.Errorf("exporting span: expected nonzero EndTime") } return got, nil } // checkTime checks that a nonzero time was set in x, then clears it. func checkTime(x *time.Time) bool { if x.IsZero() { return false } *x = time.Time{} return true } func TestEndSpanTwice(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te)) st := time.Now() et1 := st.Add(100 * time.Millisecond) et2 := st.Add(200 * time.Millisecond) span := startSpan(tp, "EndSpanTwice", trace.WithTimestamp(st)) span.End(trace.WithTimestamp(et1)) span.End(trace.WithTimestamp(et2)) if te.Len() != 1 { t.Fatalf("expected only a single span, got %#v", te.Spans()) } ro := span.(ReadOnlySpan) if ro.EndTime() != et1 { t.Fatalf("2nd call to End() should not modify end time") } } func TestStartSpanAfterEnd(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSampler(AlwaysSample()), WithSyncer(te)) ctx := context.Background() tr := tp.Tracer("SpanAfterEnd") ctx, span0 := tr.Start(trace.ContextWithRemoteSpanContext(ctx, sc), "parent") ctx1, span1 := tr.Start(ctx, "span-1") span1.End() // Start a new span with the context containing span-1 // even though span-1 is ended, we still add this as a new child of span-1 _, span2 := tr.Start(ctx1, "span-2") span2.End() span0.End() if got, want := te.Len(), 3; got != want { t.Fatalf("len(%#v) = %d; want %d", te.Spans(), got, want) } gotParent, ok := te.GetSpan("parent") if !ok { t.Fatal("parent not recorded") } gotSpan1, ok := te.GetSpan("span-1") if !ok { t.Fatal("span-1 not recorded") } gotSpan2, ok := te.GetSpan("span-2") if !ok { t.Fatal("span-2 not recorded") } if got, want := gotSpan1.SpanContext().TraceID(), gotParent.SpanContext().TraceID(); got != want { t.Errorf("span-1.TraceID=%q; want %q", got, want) } if got, want := gotSpan2.SpanContext().TraceID(), gotParent.SpanContext().TraceID(); got != want { t.Errorf("span-2.TraceID=%q; want %q", got, want) } if got, want := gotSpan1.Parent().SpanID(), gotParent.SpanContext().SpanID(); got != want { t.Errorf("span-1.ParentSpanID=%q; want %q (parent.SpanID)", got, want) } if got, want := gotSpan2.Parent().SpanID(), gotSpan1.SpanContext().SpanID(); got != want { t.Errorf("span-2.ParentSpanID=%q; want %q (span1.SpanID)", got, want) } } func TestChildSpanCount(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSampler(AlwaysSample()), WithSyncer(te)) tr := tp.Tracer("ChidSpanCount") ctx, span0 := tr.Start(context.Background(), "parent") ctx1, span1 := tr.Start(ctx, "span-1") _, span2 := tr.Start(ctx1, "span-2") span2.End() span1.End() _, span3 := tr.Start(ctx, "span-3") span3.End() span0.End() if got, want := te.Len(), 4; got != want { t.Fatalf("len(%#v) = %d; want %d", te.Spans(), got, want) } gotParent, ok := te.GetSpan("parent") if !ok { t.Fatal("parent not recorded") } gotSpan1, ok := te.GetSpan("span-1") if !ok { t.Fatal("span-1 not recorded") } gotSpan2, ok := te.GetSpan("span-2") if !ok { t.Fatal("span-2 not recorded") } gotSpan3, ok := te.GetSpan("span-3") if !ok { t.Fatal("span-3 not recorded") } if got, want := gotSpan3.ChildSpanCount(), 0; got != want { t.Errorf("span-3.ChildSpanCount=%d; want %d", got, want) } if got, want := gotSpan2.ChildSpanCount(), 0; got != want { t.Errorf("span-2.ChildSpanCount=%d; want %d", got, want) } if got, want := gotSpan1.ChildSpanCount(), 1; got != want { t.Errorf("span-1.ChildSpanCount=%d; want %d", got, want) } if got, want := gotParent.ChildSpanCount(), 2; got != want { t.Errorf("parent.ChildSpanCount=%d; want %d", got, want) } } func TestNilSpanEnd(t *testing.T) { var span *recordingSpan span.End() } func TestNonRecordingSpanDoesNotTrackRuntimeTracerTask(t *testing.T) { tp := NewTracerProvider(WithSampler(NeverSample())) tr := tp.Tracer("TestNonRecordingSpanDoesNotTrackRuntimeTracerTask") _, apiSpan := tr.Start(context.Background(), "foo") if _, ok := apiSpan.(runtimeTracer); ok { t.Fatalf("non recording span implements runtime trace task tracking") } } func TestRecordingSpanRuntimeTracerTaskEnd(t *testing.T) { tp := NewTracerProvider(WithSampler(AlwaysSample())) tr := tp.Tracer("TestRecordingSpanRuntimeTracerTaskEnd") var n uint64 executionTracerTaskEnd := func() { atomic.AddUint64(&n, 1) } _, apiSpan := tr.Start(context.Background(), "foo") s, ok := apiSpan.(*recordingSpan) if !ok { t.Fatal("recording span not returned from always sampled Tracer") } s.executionTracerTaskEnd = executionTracerTaskEnd s.End() if n != 1 { t.Error("recording span did not end runtime trace task") } } func TestCustomStartEndTime(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithSampler(AlwaysSample())) startTime := time.Date(2019, time.August, 27, 14, 42, 0, 0, time.UTC) endTime := startTime.Add(time.Second * 20) _, span := tp.Tracer("Custom Start and End time").Start( context.Background(), "testspan", trace.WithTimestamp(startTime), ) span.End(trace.WithTimestamp(endTime)) if te.Len() != 1 { t.Fatalf("got %d exported spans, want one span", te.Len()) } got := te.Spans()[0] if !got.StartTime().Equal(startTime) { t.Errorf("expected start time to be %s, got %s", startTime, got.StartTime()) } if !got.EndTime().Equal(endTime) { t.Errorf("expected end time to be %s, got %s", endTime, got.EndTime()) } } func TestRecordError(t *testing.T) { scenarios := []struct { err error typ string msg string }{ { err: ottest.NewTestError("test error"), typ: "go.opentelemetry.io/otel/sdk/internal/internaltest.TestError", msg: "test error", }, { err: errors.New("test error 2"), typ: "*errors.errorString", msg: "test error 2", }, } for _, s := range scenarios { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "RecordError") errTime := time.Now() span.RecordError(s.err, trace.WithTimestamp(errTime)) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", status: Status{Code: codes.Unset}, spanKind: trace.SpanKindInternal, events: []Event{ { Name: semconv.ExceptionEventName, Time: errTime, Attributes: []attribute.KeyValue{ semconv.ExceptionType(s.typ), semconv.ExceptionMessage(s.msg), }, }, }, instrumentationScope: instrumentation.Scope{Name: "RecordError"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("SpanErrorOptions: -got +want %s", diff) } } } func TestRecordErrorWithStackTrace(t *testing.T) { err := ottest.NewTestError("test error") typ := "go.opentelemetry.io/otel/sdk/internal/internaltest.TestError" msg := "test error" te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "RecordError") errTime := time.Now() span.RecordError(err, trace.WithTimestamp(errTime), trace.WithStackTrace(true)) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", status: Status{Code: codes.Unset}, spanKind: trace.SpanKindInternal, events: []Event{ { Name: semconv.ExceptionEventName, Time: errTime, Attributes: []attribute.KeyValue{ semconv.ExceptionType(typ), semconv.ExceptionMessage(msg), }, }, }, instrumentationScope: instrumentation.Scope{Name: "RecordError"}, } assert.Equal(t, got.spanContext, want.spanContext) assert.Equal(t, got.parent, want.parent) assert.Equal(t, got.name, want.name) assert.Equal(t, got.status, want.status) assert.Equal(t, got.spanKind, want.spanKind) assert.Equal(t, got.events[0].Attributes[0].Value.AsString(), want.events[0].Attributes[0].Value.AsString()) assert.Equal(t, got.events[0].Attributes[1].Value.AsString(), want.events[0].Attributes[1].Value.AsString()) gotStackTraceFunctionName := strings.Split(got.events[0].Attributes[2].Value.AsString(), "\n") assert.Truef(t, strings.HasPrefix(gotStackTraceFunctionName[1], "go.opentelemetry.io/otel/sdk/trace.recordStackTrace"), "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.recordStackTrace", gotStackTraceFunctionName[1]) assert.Truef(t, strings.HasPrefix(gotStackTraceFunctionName[3], "go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).RecordError"), "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).RecordError", gotStackTraceFunctionName[3]) } func TestRecordErrorNil(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) span := startSpan(tp, "RecordErrorNil") span.RecordError(nil) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", spanKind: trace.SpanKindInternal, status: Status{ Code: codes.Unset, Description: "", }, instrumentationScope: instrumentation.Scope{Name: "RecordErrorNil"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("SpanErrorOptions: -got +want %s", diff) } } func TestWithSpanKind(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithSampler(AlwaysSample()), WithResource(resource.Empty())) tr := tp.Tracer("withSpanKind") _, span := tr.Start(context.Background(), "WithoutSpanKind") spanData, err := endSpan(te, span) if err != nil { t.Error(err.Error()) } if spanData.SpanKind() != trace.SpanKindInternal { t.Errorf("Default value of Spankind should be Internal: got %+v, want %+v\n", spanData.SpanKind(), trace.SpanKindInternal) } sks := []trace.SpanKind{ trace.SpanKindInternal, trace.SpanKindServer, trace.SpanKindClient, trace.SpanKindProducer, trace.SpanKindConsumer, } for _, sk := range sks { te.Reset() _, span := tr.Start(context.Background(), fmt.Sprintf("SpanKind-%v", sk), trace.WithSpanKind(sk)) spanData, err := endSpan(te, span) if err != nil { t.Error(err.Error()) } if spanData.SpanKind() != sk { t.Errorf("WithSpanKind check: got %+v, want %+v\n", spanData.SpanKind(), sks) } } } func mergeResource(t *testing.T, r1, r2 *resource.Resource) *resource.Resource { r, err := resource.Merge(r1, r2) assert.NoError(t, err) return r } func TestWithResource(t *testing.T) { store, err := ottest.SetEnvVariables(map[string]string{ envVar: "key=value,rk5=7", }) require.NoError(t, err) defer func() { require.NoError(t, store.Restore()) }() cases := []struct { name string options []TracerProviderOption want *resource.Resource msg string }{ { name: "explicitly empty resource", options: []TracerProviderOption{WithResource(resource.Empty())}, want: resource.Environment(), }, { name: "uses default if no resource option", options: []TracerProviderOption{}, want: resource.Default(), }, { name: "explicit resource", options: []TracerProviderOption{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5)))}, want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk2", 5))), }, { name: "last resource wins", options: []TracerProviderOption{ WithResource(resource.NewSchemaless(attribute.String("rk1", "vk1"), attribute.Int64("rk2", 5))), WithResource(resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))), }, want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))), }, { name: "overlapping attributes with environment resource", options: []TracerProviderOption{WithResource(resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10)))}, want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk1", "rv1"), attribute.Int64("rk5", 10))), }, } for _, tc := range cases { tc := tc t.Run(tc.name, func(t *testing.T) { te := NewTestExporter() defaultOptions := []TracerProviderOption{WithSyncer(te), WithSampler(AlwaysSample())} tp := NewTracerProvider(append(defaultOptions, tc.options...)...) span := startSpan(tp, "WithResource") span.SetAttributes(attribute.String("key1", "value1")) got, err := endSpan(te, span) if err != nil { t.Error(err.Error()) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", attributes: []attribute.KeyValue{ attribute.String("key1", "value1"), }, spanKind: trace.SpanKindInternal, resource: tc.want, instrumentationScope: instrumentation.Scope{Name: "WithResource"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("WithResource:\n -got +want %s", diff) } }) } } func TestWithInstrumentationVersionAndSchema(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) ctx := context.Background() ctx = trace.ContextWithRemoteSpanContext(ctx, sc) _, span := tp.Tracer( "WithInstrumentationVersion", trace.WithInstrumentationVersion("v0.1.0"), trace.WithSchemaURL("https://opentelemetry.io/schemas/1.2.0"), ).Start(ctx, "span0") got, err := endSpan(te, span) if err != nil { t.Error(err.Error()) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", spanKind: trace.SpanKindInternal, instrumentationScope: instrumentation.Scope{ Name: "WithInstrumentationVersion", Version: "v0.1.0", SchemaURL: "https://opentelemetry.io/schemas/1.2.0", }, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("WithResource:\n -got +want %s", diff) } } func TestSpanCapturesPanic(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) _, span := tp.Tracer("CatchPanic").Start( context.Background(), "span", ) f := func() { defer span.End() panic(errors.New("error message")) } require.PanicsWithError(t, "error message", f) spans := te.Spans() require.Len(t, spans, 1) require.Len(t, spans[0].Events(), 1) assert.Equal(t, spans[0].Events()[0].Name, semconv.ExceptionEventName) assert.Equal(t, spans[0].Events()[0].Attributes, []attribute.KeyValue{ semconv.ExceptionType("*errors.errorString"), semconv.ExceptionMessage("error message"), }) } func TestSpanCapturesPanicWithStackTrace(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSyncer(te), WithResource(resource.Empty())) _, span := tp.Tracer("CatchPanic").Start( context.Background(), "span", ) f := func() { defer span.End(trace.WithStackTrace(true)) panic(errors.New("error message")) } require.PanicsWithError(t, "error message", f) spans := te.Spans() require.Len(t, spans, 1) require.Len(t, spans[0].Events(), 1) assert.Equal(t, spans[0].Events()[0].Name, semconv.ExceptionEventName) assert.Equal(t, spans[0].Events()[0].Attributes[0].Value.AsString(), "*errors.errorString") assert.Equal(t, spans[0].Events()[0].Attributes[1].Value.AsString(), "error message") gotStackTraceFunctionName := strings.Split(spans[0].Events()[0].Attributes[2].Value.AsString(), "\n") assert.Truef(t, strings.HasPrefix(gotStackTraceFunctionName[1], "go.opentelemetry.io/otel/sdk/trace.recordStackTrace"), "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.recordStackTrace", gotStackTraceFunctionName[1]) assert.Truef(t, strings.HasPrefix(gotStackTraceFunctionName[3], "go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).End"), "%q not prefixed with go.opentelemetry.io/otel/sdk/trace.(*recordingSpan).End", gotStackTraceFunctionName[3]) } func TestReadOnlySpan(t *testing.T) { kv := attribute.String("foo", "bar") tp := NewTracerProvider(WithResource(resource.NewSchemaless(kv))) tr := tp.Tracer("ReadOnlySpan", trace.WithInstrumentationVersion("3")) // Initialize parent context. tID, sID := tp.idGenerator.NewIDs(context.Background()) parent := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tID, SpanID: sID, TraceFlags: 0x1, Remote: true, }) ctx := trace.ContextWithRemoteSpanContext(context.Background(), parent) // Initialize linked context. tID, sID = tp.idGenerator.NewIDs(context.Background()) linked := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tID, SpanID: sID, TraceFlags: 0x1, }) st := time.Now() ctx, s := tr.Start(ctx, "foo", trace.WithTimestamp(st), trace.WithLinks(trace.Link{SpanContext: linked})) s.SetAttributes(kv) s.AddEvent("foo", trace.WithAttributes(kv)) s.SetStatus(codes.Ok, "foo") // Verify span implements ReadOnlySpan. ro, ok := s.(ReadOnlySpan) require.True(t, ok) assert.Equal(t, "foo", ro.Name()) assert.Equal(t, trace.SpanContextFromContext(ctx), ro.SpanContext()) assert.Equal(t, parent, ro.Parent()) assert.Equal(t, trace.SpanKindInternal, ro.SpanKind()) assert.Equal(t, st, ro.StartTime()) assert.True(t, ro.EndTime().IsZero()) assert.Equal(t, kv.Key, ro.Attributes()[0].Key) assert.Equal(t, kv.Value, ro.Attributes()[0].Value) assert.Equal(t, linked, ro.Links()[0].SpanContext) assert.Equal(t, kv.Key, ro.Events()[0].Attributes[0].Key) assert.Equal(t, kv.Value, ro.Events()[0].Attributes[0].Value) assert.Equal(t, codes.Ok, ro.Status().Code) assert.Equal(t, "", ro.Status().Description) assert.Equal(t, "ReadOnlySpan", ro.InstrumentationLibrary().Name) assert.Equal(t, "3", ro.InstrumentationLibrary().Version) assert.Equal(t, "ReadOnlySpan", ro.InstrumentationScope().Name) assert.Equal(t, "3", ro.InstrumentationScope().Version) assert.Equal(t, kv.Key, ro.Resource().Attributes()[0].Key) assert.Equal(t, kv.Value, ro.Resource().Attributes()[0].Value) // Verify changes to the original span are reflected in the ReadOnlySpan. s.SetName("bar") assert.Equal(t, "bar", ro.Name()) // Verify snapshot() returns snapshots that are independent from the // original span and from one another. d1 := s.(*recordingSpan).snapshot() s.AddEvent("baz") d2 := s.(*recordingSpan).snapshot() for _, e := range d1.Events() { if e.Name == "baz" { t.Errorf("Didn't expect to find 'baz' event") } } var exists bool for _, e := range d2.Events() { if e.Name == "baz" { exists = true } } if !exists { t.Errorf("Expected to find 'baz' event") } et := st.Add(time.Millisecond) s.End(trace.WithTimestamp(et)) assert.Equal(t, et, ro.EndTime()) } func TestReadWriteSpan(t *testing.T) { tp := NewTracerProvider(WithResource(resource.Empty())) tr := tp.Tracer("ReadWriteSpan") // Initialize parent context. tID, sID := tp.idGenerator.NewIDs(context.Background()) parent := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tID, SpanID: sID, TraceFlags: 0x1, }) ctx := trace.ContextWithRemoteSpanContext(context.Background(), parent) _, span := tr.Start(ctx, "foo") defer span.End() // Verify span implements ReadOnlySpan. rw, ok := span.(ReadWriteSpan) require.True(t, ok) // Verify the span can be read from. assert.False(t, rw.StartTime().IsZero()) // Verify the span can be written to. rw.SetName("bar") assert.Equal(t, "bar", rw.Name()) // NOTE: This function tests ReadWriteSpan which is an interface which // embeds trace.Span and ReadOnlySpan. Since both of these interfaces have // their own tests, there is no point in testing all the possible methods // available via ReadWriteSpan as doing so would mean creating a lot of // duplication. } func TestAddEventsWithMoreAttributesThanLimit(t *testing.T) { te := NewTestExporter() sl := NewSpanLimits() sl.AttributePerEventCountLimit = 2 tp := NewTracerProvider( WithSpanLimits(sl), WithSyncer(te), WithResource(resource.Empty()), ) span := startSpan(tp, "AddSpanEventWithOverLimitedAttributes") span.AddEvent("test1", trace.WithAttributes( attribute.Bool("key1", true), attribute.String("key2", "value2"), )) // Parts of the attribute should be discard span.AddEvent("test2", trace.WithAttributes( attribute.Bool("key1", true), attribute.String("key2", "value2"), attribute.String("key3", "value3"), attribute.String("key4", "value4"), )) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } for i := range got.Events() { if !checkTime(&got.Events()[i].Time) { t.Error("exporting span: expected nonzero Event Time") } } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", attributes: nil, events: []Event{ { Name: "test1", Attributes: []attribute.KeyValue{ attribute.Bool("key1", true), attribute.String("key2", "value2"), }, }, { Name: "test2", Attributes: []attribute.KeyValue{ attribute.Bool("key1", true), attribute.String("key2", "value2"), }, DroppedAttributeCount: 2, }, }, spanKind: trace.SpanKindInternal, instrumentationScope: instrumentation.Scope{Name: "AddSpanEventWithOverLimitedAttributes"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("SetSpanAttributesOverLimit: -got +want %s", diff) } } func TestAddLinksWithMoreAttributesThanLimit(t *testing.T) { te := NewTestExporter() sl := NewSpanLimits() sl.AttributePerLinkCountLimit = 1 tp := NewTracerProvider( WithSpanLimits(sl), WithSyncer(te), WithResource(resource.Empty()), ) k1v1 := attribute.String("key1", "value1") k2v2 := attribute.String("key2", "value2") k3v3 := attribute.String("key3", "value3") k4v4 := attribute.String("key4", "value4") sc1 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) sc2 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) span := startSpan(tp, "Links", trace.WithLinks([]trace.Link{ {SpanContext: sc1, Attributes: []attribute.KeyValue{k1v1, k2v2}}, {SpanContext: sc2, Attributes: []attribute.KeyValue{k2v2, k3v3, k4v4}}, }...)) got, err := endSpan(te, span) if err != nil { t.Fatal(err) } want := &snapshot{ spanContext: trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, TraceFlags: 0x1, }), parent: sc.WithRemote(true), name: "span0", links: []Link{ { SpanContext: sc1, Attributes: []attribute.KeyValue{k1v1}, DroppedAttributeCount: 1, }, { SpanContext: sc2, Attributes: []attribute.KeyValue{k2v2}, DroppedAttributeCount: 2, }, }, spanKind: trace.SpanKindInternal, instrumentationScope: instrumentation.Scope{Name: "Links"}, } if diff := cmpDiff(got, want); diff != "" { t.Errorf("Link: -got +want %s", diff) } } type stateSampler struct { prefix string f func(trace.TraceState) trace.TraceState } func (s *stateSampler) ShouldSample(p SamplingParameters) SamplingResult { decision := Drop if strings.HasPrefix(p.Name, s.prefix) { decision = RecordAndSample } ts := s.f(trace.SpanContextFromContext(p.ParentContext).TraceState()) return SamplingResult{Decision: decision, Tracestate: ts} } func (s stateSampler) Description() string { return "stateSampler" } // Check that a new span propagates the SamplerResult.TraceState. func TestSamplerTraceState(t *testing.T) { mustTS := func(ts trace.TraceState, err error) trace.TraceState { require.NoError(t, err) return ts } makeInserter := func(k, v, prefix string) Sampler { return &stateSampler{ prefix: prefix, f: func(t trace.TraceState) trace.TraceState { return mustTS(t.Insert(k, v)) }, } } makeDeleter := func(k, prefix string) Sampler { return &stateSampler{ prefix: prefix, f: func(t trace.TraceState) trace.TraceState { return t.Delete(k) }, } } clearer := func(prefix string) Sampler { return &stateSampler{ prefix: prefix, f: func(t trace.TraceState) trace.TraceState { return trace.TraceState{} }, } } tests := []struct { name string sampler Sampler spanName string input trace.TraceState want trace.TraceState exportSpan bool }{ { name: "alwaysOn", sampler: AlwaysSample(), input: mustTS(trace.ParseTraceState("k1=v1")), want: mustTS(trace.ParseTraceState("k1=v1")), exportSpan: true, }, { name: "alwaysOff", sampler: NeverSample(), input: mustTS(trace.ParseTraceState("k1=v1")), want: mustTS(trace.ParseTraceState("k1=v1")), exportSpan: false, }, { name: "insertKeySampled", sampler: makeInserter("k2", "v2", "span"), spanName: "span0", input: mustTS(trace.ParseTraceState("k1=v1")), want: mustTS(trace.ParseTraceState("k2=v2,k1=v1")), exportSpan: true, }, { name: "insertKeyDropped", sampler: makeInserter("k2", "v2", "span"), spanName: "nospan0", input: mustTS(trace.ParseTraceState("k1=v1")), want: mustTS(trace.ParseTraceState("k2=v2,k1=v1")), exportSpan: false, }, { name: "deleteKeySampled", sampler: makeDeleter("k1", "span"), spanName: "span0", input: mustTS(trace.ParseTraceState("k1=v1,k2=v2")), want: mustTS(trace.ParseTraceState("k2=v2")), exportSpan: true, }, { name: "deleteKeyDropped", sampler: makeDeleter("k1", "span"), spanName: "nospan0", input: mustTS(trace.ParseTraceState("k1=v1,k2=v2,k3=v3")), want: mustTS(trace.ParseTraceState("k2=v2,k3=v3")), exportSpan: false, }, { name: "clearer", sampler: clearer("span"), spanName: "span0", input: mustTS(trace.ParseTraceState("k1=v1,k3=v3")), want: mustTS(trace.ParseTraceState("")), exportSpan: true, }, } for _, ts := range tests { ts := ts t.Run(ts.name, func(t *testing.T) { te := NewTestExporter() tp := NewTracerProvider(WithSampler(ts.sampler), WithSyncer(te), WithResource(resource.Empty())) tr := tp.Tracer("TraceState") sc1 := trace.NewSpanContext(trace.SpanContextConfig{ TraceID: tid, SpanID: sid, TraceFlags: trace.FlagsSampled, TraceState: ts.input, }) ctx := trace.ContextWithRemoteSpanContext(context.Background(), sc1) _, span := tr.Start(ctx, ts.spanName) // span's TraceState should be set regardless of Sampled/NonSampled state. require.Equal(t, ts.want, span.SpanContext().TraceState()) span.End() got := te.Spans() if len(got) > 0 != ts.exportSpan { t.Errorf("unexpected number of exported spans %d", len(got)) } if len(got) == 0 { return } receivedState := got[0].SpanContext().TraceState() if diff := cmpDiff(receivedState, ts.want); diff != "" { t.Errorf("TraceState not propagated: -got +want %s", diff) } }) } } type testIDGenerator struct { traceID int spanID int } func (gen *testIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { traceIDHex := fmt.Sprintf("%032x", gen.traceID) traceID, _ := trace.TraceIDFromHex(traceIDHex) gen.traceID++ spanID := gen.NewSpanID(ctx, traceID) return traceID, spanID } func (gen *testIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { spanIDHex := fmt.Sprintf("%016x", gen.spanID) spanID, _ := trace.SpanIDFromHex(spanIDHex) gen.spanID++ return spanID } var _ IDGenerator = (*testIDGenerator)(nil) func TestWithIDGenerator(t *testing.T) { const ( startTraceID = 1 startSpanID = 1 numSpan = 10 ) gen := &testIDGenerator{traceID: startSpanID, spanID: startSpanID} for i := 0; i < numSpan; i++ { te := NewTestExporter() tp := NewTracerProvider( WithSyncer(te), WithIDGenerator(gen), ) span := startSpan(tp, "TestWithIDGenerator") got, err := strconv.ParseUint(span.SpanContext().SpanID().String(), 16, 64) require.NoError(t, err) want := uint64(startSpanID + i) assert.Equal(t, got, want) _, err = endSpan(te, span) require.NoError(t, err) } } func TestEmptyRecordingSpanAttributes(t *testing.T) { assert.Nil(t, (&recordingSpan{}).Attributes()) } func TestEmptyRecordingSpanDroppedAttributes(t *testing.T) { assert.Equal(t, 0, (&recordingSpan{}).DroppedAttributes()) } opentelemetry-go-1.21.0/sdk/trace/tracer.go000066400000000000000000000120471452547353200205760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" import ( "context" "time" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) type tracer struct { embedded.Tracer provider *TracerProvider instrumentationScope instrumentation.Scope } var _ trace.Tracer = &tracer{} // Start starts a Span and returns it along with a context containing it. // // The Span is created with the provided name and as a child of any existing // span context found in the passed context. The created Span will be // configured appropriately by any SpanOption passed. func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) if ctx == nil { // Prevent trace.ContextWithSpan from panicking. ctx = context.Background() } // For local spans created by this SDK, track child span count. if p := trace.SpanFromContext(ctx); p != nil { if sdkSpan, ok := p.(*recordingSpan); ok { sdkSpan.addChild() } } s := tr.newSpan(ctx, name, &config) if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { sps := tr.provider.getSpanProcessors() for _, sp := range sps { sp.sp.OnStart(ctx, rw) } } if rtt, ok := s.(runtimeTracer); ok { ctx = rtt.runtimeTrace(ctx) } return trace.ContextWithSpan(ctx, s), s } type runtimeTracer interface { // runtimeTrace starts a "runtime/trace".Task for the span and // returns a context containing the task. runtimeTrace(ctx context.Context) context.Context } // newSpan returns a new configured span. func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { // If told explicitly to make this a new root use a zero value SpanContext // as a parent which contains an invalid trace ID and is not remote. var psc trace.SpanContext if config.NewRoot() { ctx = trace.ContextWithSpanContext(ctx, psc) } else { psc = trace.SpanContextFromContext(ctx) } // If there is a valid parent trace ID, use it to ensure the continuity of // the trace. Always generate a new span ID so other components can rely // on a unique span ID, even if the Span is non-recording. var tid trace.TraceID var sid trace.SpanID if !psc.TraceID().IsValid() { tid, sid = tr.provider.idGenerator.NewIDs(ctx) } else { tid = psc.TraceID() sid = tr.provider.idGenerator.NewSpanID(ctx, tid) } samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ ParentContext: ctx, TraceID: tid, Name: name, Kind: config.SpanKind(), Attributes: config.Attributes(), Links: config.Links(), }) scc := trace.SpanContextConfig{ TraceID: tid, SpanID: sid, TraceState: samplingResult.Tracestate, } if isSampled(samplingResult) { scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled } else { scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled } sc := trace.NewSpanContext(scc) if !isRecording(samplingResult) { return tr.newNonRecordingSpan(sc) } return tr.newRecordingSpan(psc, sc, name, samplingResult, config) } // newRecordingSpan returns a new configured recordingSpan. func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { startTime := config.Timestamp() if startTime.IsZero() { startTime = time.Now() } s := &recordingSpan{ // Do not pre-allocate the attributes slice here! Doing so will // allocate memory that is likely never going to be used, or if used, // will be over-sized. The default Go compiler has been tested to // dynamically allocate needed space very well. Benchmarking has shown // it to be more performant than what we can predetermine here, // especially for the common use case of few to no added // attributes. parent: psc, spanContext: sc, spanKind: trace.ValidateSpanKind(config.SpanKind()), name: name, startTime: startTime, events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), tracer: tr, } for _, l := range config.Links() { s.addLink(l) } s.SetAttributes(sr.Attributes...) s.SetAttributes(config.Attributes()...) return s } // newNonRecordingSpan returns a new configured nonRecordingSpan. func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { return nonRecordingSpan{tracer: tr, sc: sc} } opentelemetry-go-1.21.0/sdk/trace/tracetest/000077500000000000000000000000001452547353200207615ustar00rootroot00000000000000opentelemetry-go-1.21.0/sdk/trace/tracetest/exporter.go000066400000000000000000000050601452547353200231610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package tracetest is a testing helper package for the SDK. User can // configure no-op or in-memory exporters to verify different SDK behaviors or // custom instrumentation. package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" import ( "context" "sync" "go.opentelemetry.io/otel/sdk/trace" ) var _ trace.SpanExporter = (*NoopExporter)(nil) // NewNoopExporter returns a new no-op exporter. func NewNoopExporter() *NoopExporter { return new(NoopExporter) } // NoopExporter is an exporter that drops all received spans and performs no // action. type NoopExporter struct{} // ExportSpans handles export of spans by dropping them. func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } // Shutdown stops the exporter by doing nothing. func (nsb *NoopExporter) Shutdown(context.Context) error { return nil } var _ trace.SpanExporter = (*InMemoryExporter)(nil) // NewInMemoryExporter returns a new InMemoryExporter. func NewInMemoryExporter() *InMemoryExporter { return new(InMemoryExporter) } // InMemoryExporter is an exporter that stores all received spans in-memory. type InMemoryExporter struct { mu sync.Mutex ss SpanStubs } // ExportSpans handles export of spans by storing them in memory. func (imsb *InMemoryExporter) ExportSpans(_ context.Context, spans []trace.ReadOnlySpan) error { imsb.mu.Lock() defer imsb.mu.Unlock() imsb.ss = append(imsb.ss, SpanStubsFromReadOnlySpans(spans)...) return nil } // Shutdown stops the exporter by clearing spans held in memory. func (imsb *InMemoryExporter) Shutdown(context.Context) error { imsb.Reset() return nil } // Reset the current in-memory storage. func (imsb *InMemoryExporter) Reset() { imsb.mu.Lock() defer imsb.mu.Unlock() imsb.ss = nil } // GetSpans returns the current in-memory stored spans. func (imsb *InMemoryExporter) GetSpans() SpanStubs { imsb.mu.Lock() defer imsb.mu.Unlock() ret := make(SpanStubs, len(imsb.ss)) copy(ret, imsb.ss) return ret } opentelemetry-go-1.21.0/sdk/trace/tracetest/exporter_test.go000066400000000000000000000036301452547353200242210ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetest import ( "context" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestNoop tests only that the no-op does not crash in different scenarios. func TestNoop(t *testing.T) { nsb := NewNoopExporter() require.NoError(t, nsb.ExportSpans(context.Background(), nil)) require.NoError(t, nsb.ExportSpans(context.Background(), make(SpanStubs, 10).Snapshots())) require.NoError(t, nsb.ExportSpans(context.Background(), make(SpanStubs, 0, 10).Snapshots())) } func TestNewInMemoryExporter(t *testing.T) { imsb := NewInMemoryExporter() require.NoError(t, imsb.ExportSpans(context.Background(), nil)) assert.Len(t, imsb.GetSpans(), 0) input := make(SpanStubs, 10) for i := 0; i < 10; i++ { input[i] = SpanStub{Name: fmt.Sprintf("span %d", i)} } require.NoError(t, imsb.ExportSpans(context.Background(), input.Snapshots())) sds := imsb.GetSpans() assert.Len(t, sds, 10) for i, sd := range sds { assert.Equal(t, input[i], sd) } imsb.Reset() // Ensure that operations on the internal storage does not change the previously returned value. assert.Len(t, sds, 10) assert.Len(t, imsb.GetSpans(), 0) require.NoError(t, imsb.ExportSpans(context.Background(), input.Snapshots()[0:1])) sds = imsb.GetSpans() assert.Len(t, sds, 1) assert.Equal(t, input[0], sds[0]) } opentelemetry-go-1.21.0/sdk/trace/tracetest/recorder.go000066400000000000000000000050031452547353200231130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" import ( "context" "sync" sdktrace "go.opentelemetry.io/otel/sdk/trace" ) // SpanRecorder records started and ended spans. type SpanRecorder struct { startedMu sync.RWMutex started []sdktrace.ReadWriteSpan endedMu sync.RWMutex ended []sdktrace.ReadOnlySpan } var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil) // NewSpanRecorder returns a new initialized SpanRecorder. func NewSpanRecorder() *SpanRecorder { return new(SpanRecorder) } // OnStart records started spans. // // This method is safe to be called concurrently. func (sr *SpanRecorder) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) { sr.startedMu.Lock() defer sr.startedMu.Unlock() sr.started = append(sr.started, s) } // OnEnd records completed spans. // // This method is safe to be called concurrently. func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) { sr.endedMu.Lock() defer sr.endedMu.Unlock() sr.ended = append(sr.ended, s) } // Shutdown does nothing. // // This method is safe to be called concurrently. func (sr *SpanRecorder) Shutdown(context.Context) error { return nil } // ForceFlush does nothing. // // This method is safe to be called concurrently. func (sr *SpanRecorder) ForceFlush(context.Context) error { return nil } // Started returns a copy of all started spans that have been recorded. // // This method is safe to be called concurrently. func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan { sr.startedMu.RLock() defer sr.startedMu.RUnlock() dst := make([]sdktrace.ReadWriteSpan, len(sr.started)) copy(dst, sr.started) return dst } // Ended returns a copy of all ended spans that have been recorded. // // This method is safe to be called concurrently. func (sr *SpanRecorder) Ended() []sdktrace.ReadOnlySpan { sr.endedMu.RLock() defer sr.endedMu.RUnlock() dst := make([]sdktrace.ReadOnlySpan, len(sr.ended)) copy(dst, sr.ended) return dst } opentelemetry-go-1.21.0/sdk/trace/tracetest/recorder_test.go000066400000000000000000000053001452547353200241520ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetest import ( "context" "sync" "testing" "github.com/stretchr/testify/assert" sdktrace "go.opentelemetry.io/otel/sdk/trace" ) type rwSpan struct { sdktrace.ReadWriteSpan } func TestSpanRecorderOnStartAppends(t *testing.T) { s0, s1 := new(rwSpan), new(rwSpan) ctx := context.Background() sr := new(SpanRecorder) assert.Len(t, sr.started, 0) sr.OnStart(ctx, s0) assert.Len(t, sr.started, 1) sr.OnStart(ctx, s1) assert.Len(t, sr.started, 2) // Ensure order correct. started := sr.Started() assert.Same(t, s0, started[0]) assert.Same(t, s1, started[1]) } type roSpan struct { sdktrace.ReadOnlySpan } func TestSpanRecorderOnEndAppends(t *testing.T) { s0, s1 := new(roSpan), new(roSpan) sr := new(SpanRecorder) assert.Len(t, sr.ended, 0) sr.OnEnd(s0) assert.Len(t, sr.ended, 1) sr.OnEnd(s1) assert.Len(t, sr.ended, 2) // Ensure order correct. ended := sr.Ended() assert.Same(t, s0, ended[0]) assert.Same(t, s1, ended[1]) } func TestSpanRecorderShutdownNoError(t *testing.T) { ctx := context.Background() assert.NoError(t, new(SpanRecorder).Shutdown(ctx)) var c context.CancelFunc ctx, c = context.WithCancel(ctx) c() assert.NoError(t, new(SpanRecorder).Shutdown(ctx)) } func TestSpanRecorderForceFlushNoError(t *testing.T) { ctx := context.Background() assert.NoError(t, new(SpanRecorder).ForceFlush(ctx)) var c context.CancelFunc ctx, c = context.WithCancel(ctx) c() assert.NoError(t, new(SpanRecorder).ForceFlush(ctx)) } func runConcurrently(funcs ...func()) { var wg sync.WaitGroup for _, f := range funcs { wg.Add(1) go func(f func()) { f() wg.Done() }(f) } wg.Wait() } func TestEndingConcurrentSafe(t *testing.T) { sr := NewSpanRecorder() runConcurrently( func() { sr.OnEnd(new(roSpan)) }, func() { sr.OnEnd(new(roSpan)) }, func() { sr.Ended() }, ) assert.Len(t, sr.Ended(), 2) } func TestStartingConcurrentSafe(t *testing.T) { sr := NewSpanRecorder() ctx := context.Background() runConcurrently( func() { sr.OnStart(ctx, new(rwSpan)) }, func() { sr.OnStart(ctx, new(rwSpan)) }, func() { sr.Started() }, ) assert.Len(t, sr.Started(), 2) } opentelemetry-go-1.21.0/sdk/trace/tracetest/span.go000066400000000000000000000134001452547353200222470ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" import ( "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/resource" tracesdk "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" ) // SpanStubs is a slice of SpanStub use for testing an SDK. type SpanStubs []SpanStub // SpanStubsFromReadOnlySpans returns SpanStubs populated from ro. func SpanStubsFromReadOnlySpans(ro []tracesdk.ReadOnlySpan) SpanStubs { if len(ro) == 0 { return nil } s := make(SpanStubs, 0, len(ro)) for _, r := range ro { s = append(s, SpanStubFromReadOnlySpan(r)) } return s } // Snapshots returns s as a slice of ReadOnlySpans. func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { if len(s) == 0 { return nil } ro := make([]tracesdk.ReadOnlySpan, len(s)) for i := 0; i < len(s); i++ { ro[i] = s[i].Snapshot() } return ro } // SpanStub is a stand-in for a Span. type SpanStub struct { Name string SpanContext trace.SpanContext Parent trace.SpanContext SpanKind trace.SpanKind StartTime time.Time EndTime time.Time Attributes []attribute.KeyValue Events []tracesdk.Event Links []tracesdk.Link Status tracesdk.Status DroppedAttributes int DroppedEvents int DroppedLinks int ChildSpanCount int Resource *resource.Resource InstrumentationLibrary instrumentation.Library } // SpanStubFromReadOnlySpan returns a SpanStub populated from ro. func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { if ro == nil { return SpanStub{} } return SpanStub{ Name: ro.Name(), SpanContext: ro.SpanContext(), Parent: ro.Parent(), SpanKind: ro.SpanKind(), StartTime: ro.StartTime(), EndTime: ro.EndTime(), Attributes: ro.Attributes(), Events: ro.Events(), Links: ro.Links(), Status: ro.Status(), DroppedAttributes: ro.DroppedAttributes(), DroppedEvents: ro.DroppedEvents(), DroppedLinks: ro.DroppedLinks(), ChildSpanCount: ro.ChildSpanCount(), Resource: ro.Resource(), InstrumentationLibrary: ro.InstrumentationScope(), } } // Snapshot returns a read-only copy of the SpanStub. func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { return spanSnapshot{ name: s.Name, spanContext: s.SpanContext, parent: s.Parent, spanKind: s.SpanKind, startTime: s.StartTime, endTime: s.EndTime, attributes: s.Attributes, events: s.Events, links: s.Links, status: s.Status, droppedAttributes: s.DroppedAttributes, droppedEvents: s.DroppedEvents, droppedLinks: s.DroppedLinks, childSpanCount: s.ChildSpanCount, resource: s.Resource, instrumentationScope: s.InstrumentationLibrary, } } type spanSnapshot struct { // Embed the interface to implement the private method. tracesdk.ReadOnlySpan name string spanContext trace.SpanContext parent trace.SpanContext spanKind trace.SpanKind startTime time.Time endTime time.Time attributes []attribute.KeyValue events []tracesdk.Event links []tracesdk.Link status tracesdk.Status droppedAttributes int droppedEvents int droppedLinks int childSpanCount int resource *resource.Resource instrumentationScope instrumentation.Scope } func (s spanSnapshot) Name() string { return s.name } func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext } func (s spanSnapshot) Parent() trace.SpanContext { return s.parent } func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind } func (s spanSnapshot) StartTime() time.Time { return s.startTime } func (s spanSnapshot) EndTime() time.Time { return s.endTime } func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes } func (s spanSnapshot) Links() []tracesdk.Link { return s.links } func (s spanSnapshot) Events() []tracesdk.Event { return s.events } func (s spanSnapshot) Status() tracesdk.Status { return s.status } func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes } func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks } func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents } func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount } func (s spanSnapshot) Resource() *resource.Resource { return s.resource } func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { return s.instrumentationScope } func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { return s.instrumentationScope } opentelemetry-go-1.21.0/sdk/trace/util_test.go000066400000000000000000000015211452547353200213250ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace_test import ( "testing" sdktrace "go.opentelemetry.io/otel/sdk/trace" ) func basicTracerProvider(t *testing.T) *sdktrace.TracerProvider { tp := sdktrace.NewTracerProvider(sdktrace.WithSampler(sdktrace.AlwaysSample())) return tp } opentelemetry-go-1.21.0/sdk/trace/version.go000066400000000000000000000014101452547353200207730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/sdk/trace" // version is the current release version of the metric SDK in use. func version() string { return "1.16.0-rc.1" } opentelemetry-go-1.21.0/sdk/trace/version_test.go000066400000000000000000000017711452547353200220440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "regexp" "testing" "github.com/stretchr/testify/assert" ) var versionRegex = regexp.MustCompile(`^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)` + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)` + `(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`) func TestVersionSemver(t *testing.T) { v := version() assert.Regexp(t, versionRegex, v) } opentelemetry-go-1.21.0/sdk/version.go000066400000000000000000000014021452547353200176760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { return "1.21.0" } opentelemetry-go-1.21.0/sdk/version_test.go000066400000000000000000000021221452547353200207350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sdk_test import ( "regexp" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/sdk" ) // regex taken from https://github.com/Masterminds/semver/tree/v3.1.1 var versionRegex = regexp.MustCompile(`^v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$`) func TestVersionSemver(t *testing.T) { v := sdk.Version() assert.NotNil(t, versionRegex.FindStringSubmatch(v), "version is not semver: %s", v) } opentelemetry-go-1.21.0/semconv/000077500000000000000000000000001452547353200165565ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/internal/000077500000000000000000000000001452547353200203725ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/internal/http.go000066400000000000000000000260601452547353200217040ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/semconv/internal" import ( "fmt" "net" "net/http" "strconv" "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" ) // SemanticConventions are the semantic convention values defined for a // version of the OpenTelemetry specification. type SemanticConventions struct { EnduserIDKey attribute.Key HTTPClientIPKey attribute.Key HTTPFlavorKey attribute.Key HTTPHostKey attribute.Key HTTPMethodKey attribute.Key HTTPRequestContentLengthKey attribute.Key HTTPRouteKey attribute.Key HTTPSchemeHTTP attribute.KeyValue HTTPSchemeHTTPS attribute.KeyValue HTTPServerNameKey attribute.Key HTTPStatusCodeKey attribute.Key HTTPTargetKey attribute.Key HTTPURLKey attribute.Key HTTPUserAgentKey attribute.Key NetHostIPKey attribute.Key NetHostNameKey attribute.Key NetHostPortKey attribute.Key NetPeerIPKey attribute.Key NetPeerNameKey attribute.Key NetPeerPortKey attribute.Key NetTransportIP attribute.KeyValue NetTransportOther attribute.KeyValue NetTransportTCP attribute.KeyValue NetTransportUDP attribute.KeyValue NetTransportUnix attribute.KeyValue } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{} switch network { case "tcp", "tcp4", "tcp6": attrs = append(attrs, sc.NetTransportTCP) case "udp", "udp4", "udp6": attrs = append(attrs, sc.NetTransportUDP) case "ip", "ip4", "ip6": attrs = append(attrs, sc.NetTransportIP) case "unix", "unixgram", "unixpacket": attrs = append(attrs, sc.NetTransportUnix) default: attrs = append(attrs, sc.NetTransportOther) } peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) if peerIP != "" { attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) } if peerName != "" { attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) } if peerPort != 0 { attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) } hostIP, hostName, hostPort := "", "", 0 for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { hostIP, hostName, hostPort = hostIPNamePort(someHost) if hostIP != "" || hostName != "" || hostPort != 0 { break } } if hostIP != "" { attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) } if hostName != "" { attrs = append(attrs, sc.NetHostNameKey.String(hostName)) } if hostPort != 0 { attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) } return attrs } // hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. // It handles both IPv4 and IPv6 addresses. If the host portion is not recognized // as a valid IPv4 or IPv6 address, the `ip` result will be empty and the // host portion will instead be returned in `name`. func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { var ( hostPart, portPart string parsedPort uint64 err error ) if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { hostPart, portPart = hostWithPort, "" } if parsedIP := net.ParseIP(hostPart); parsedIP != nil { ip = parsedIP.String() } else { name = hostPart } if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { port = int(parsedPort) } return } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { if username, _, ok := request.BasicAuth(); ok { return []attribute.KeyValue{sc.EnduserIDKey.String(username)} } return nil } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{} // remove any username/password info that may be in the URL // before adding it to the attributes userinfo := request.URL.User request.URL.User = nil attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) // restore any username/password info that was removed request.URL.User = userinfo return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) } func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{} if ua := request.UserAgent(); ua != "" { attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) } if request.ContentLength > 0 { attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) } return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) } func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality attrs := []attribute.KeyValue{} if request.TLS != nil { attrs = append(attrs, sc.HTTPSchemeHTTPS) } else { attrs = append(attrs, sc.HTTPSchemeHTTP) } if request.Host != "" { attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) } else if request.URL != nil && request.URL.Host != "" { attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) } flavor := "" if request.ProtoMajor == 1 { flavor = fmt.Sprintf("1.%d", request.ProtoMinor) } else if request.ProtoMajor == 2 { flavor = "2" } if flavor != "" { attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) } if request.Method != "" { attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) } else { attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) } return attrs } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{} if serverName != "" { attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) } return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { attrs := []attribute.KeyValue{ sc.HTTPTargetKey.String(request.RequestURI), } if serverName != "" { attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) } if route != "" { attrs = append(attrs, sc.HTTPRouteKey.String(route)) } if values := request.Header["X-Forwarded-For"]; len(values) > 0 { addr := values[0] if i := strings.Index(addr, ","); i > 0 { addr = addr[:i] } attrs = append(attrs, sc.HTTPClientIPKey.String(addr)) } return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { attrs := []attribute.KeyValue{ sc.HTTPStatusCodeKey.Int(code), } return attrs } type codeRange struct { fromInclusive int toInclusive int } func (r codeRange) contains(code int) bool { return r.fromInclusive <= code && code <= r.toInclusive } var validRangesPerCategory = map[int][]codeRange{ 1: { {http.StatusContinue, http.StatusEarlyHints}, }, 2: { {http.StatusOK, http.StatusAlreadyReported}, {http.StatusIMUsed, http.StatusIMUsed}, }, 3: { {http.StatusMultipleChoices, http.StatusUseProxy}, {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, }, 4: { {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, {http.StatusPreconditionRequired, http.StatusTooManyRequests}, {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, }, 5: { {http.StatusInternalServerError, http.StatusLoopDetected}, {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, }, } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { spanCode, valid := validateHTTPStatusCode(code) if !valid { return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) } return spanCode, "" } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { spanCode, valid := validateHTTPStatusCode(code) if !valid { return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) } category := code / 100 if spanKind == trace.SpanKindServer && category == 4 { return codes.Unset, "" } return spanCode, "" } // validateHTTPStatusCode validates the HTTP status code and returns // corresponding span status code. If the `code` is not a valid HTTP status // code, returns span status Error and false. func validateHTTPStatusCode(code int) (codes.Code, bool) { category := code / 100 ranges, ok := validRangesPerCategory[category] if !ok { return codes.Error, false } ok = false for _, crange := range ranges { ok = crange.contains(code) if ok { break } } if !ok { return codes.Error, false } if category > 0 && category < 4 { return codes.Unset, true } return codes.Error, true } opentelemetry-go-1.21.0/semconv/internal/http_test.go000066400000000000000000001213101452547353200227350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "crypto/tls" "net/http" "net/url" "strings" "testing" "go.opentelemetry.io/otel/trace" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) type tlsOption int const ( noTLS tlsOption = iota withTLS ) var sc = &SemanticConventions{ EnduserIDKey: attribute.Key("enduser.id"), HTTPClientIPKey: attribute.Key("http.client_ip"), HTTPFlavorKey: attribute.Key("http.flavor"), HTTPHostKey: attribute.Key("http.host"), HTTPMethodKey: attribute.Key("http.method"), HTTPRequestContentLengthKey: attribute.Key("http.request_content_length"), HTTPRouteKey: attribute.Key("http.route"), HTTPSchemeHTTP: attribute.String("http.scheme", "http"), HTTPSchemeHTTPS: attribute.String("http.scheme", "https"), HTTPServerNameKey: attribute.Key("http.server_name"), HTTPStatusCodeKey: attribute.Key("http.status_code"), HTTPTargetKey: attribute.Key("http.target"), HTTPURLKey: attribute.Key("http.url"), HTTPUserAgentKey: attribute.Key("http.user_agent"), NetHostIPKey: attribute.Key("net.host.ip"), NetHostNameKey: attribute.Key("net.host.name"), NetHostPortKey: attribute.Key("net.host.port"), NetPeerIPKey: attribute.Key("net.peer.ip"), NetPeerNameKey: attribute.Key("net.peer.name"), NetPeerPortKey: attribute.Key("net.peer.port"), NetTransportIP: attribute.String("net.transport", "ip"), NetTransportOther: attribute.String("net.transport", "other"), NetTransportTCP: attribute.String("net.transport", "ip_tcp"), NetTransportUDP: attribute.String("net.transport", "ip_udp"), NetTransportUnix: attribute.String("net.transport", "unix"), } func TestNetAttributesFromHTTPRequest(t *testing.T) { type testcase struct { name string network string method string requestURI string proto string remoteAddr string host string url *url.URL header http.Header expected []attribute.KeyValue } testcases := []testcase{ { name: "stripped, tcp", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), }, }, { name: "stripped, udp", network: "udp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_udp"), }, }, { name: "stripped, ip", network: "ip", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip"), }, }, { name: "stripped, unix", network: "unix", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "unix"), }, }, { name: "stripped, other", network: "nih", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "other"), }, }, { name: "with remote ipv4 and port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), }, }, { name: "with remote ipv6 and port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "[fe80::0202:b3ff:fe1e:8329]:56", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "fe80::202:b3ff:fe1e:8329"), attribute.Int("net.peer.port", 56), }, }, { name: "with remote ipv4-in-v6 and port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "[::ffff:192.168.0.1]:56", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "192.168.0.1"), attribute.Int("net.peer.port", 56), }, }, { name: "with remote name and port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "example.com:56", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.name", "example.com"), attribute.Int("net.peer.port", 56), }, }, { name: "with remote ipv4 only", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), }, }, { name: "with remote ipv6 only", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "fe80::0202:b3ff:fe1e:8329", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "fe80::202:b3ff:fe1e:8329"), }, }, { name: "with remote ipv4_in_v6 only", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "::ffff:192.168.0.1", // section 2.5.5.2 of RFC4291 host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "192.168.0.1"), }, }, { name: "with remote name only", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "example.com", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.name", "example.com"), }, }, { name: "with remote port only", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: ":56", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.Int("net.peer.port", 56), }, }, { name: "with host name only", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.name", "example.com"), }, }, { name: "with host ipv4 only", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "4.3.2.1", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "4.3.2.1"), }, }, { name: "with host ipv6 only", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "fe80::0202:b3ff:fe1e:8329", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "fe80::202:b3ff:fe1e:8329"), }, }, { name: "with host name and port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "example.com:78", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.name", "example.com"), attribute.Int("net.host.port", 78), }, }, { name: "with host ipv4 and port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "4.3.2.1:78", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "4.3.2.1"), attribute.Int("net.host.port", 78), }, }, { name: "with host ipv6 and port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "[fe80::202:b3ff:fe1e:8329]:78", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "fe80::202:b3ff:fe1e:8329"), attribute.Int("net.host.port", 78), }, }, { name: "with host name and bogus port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "example.com:qwerty", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.name", "example.com"), }, }, { name: "with host ipv4 and bogus port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "4.3.2.1:qwerty", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "4.3.2.1"), }, }, { name: "with host ipv6 and bogus port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "[fe80::202:b3ff:fe1e:8329]:qwerty", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "fe80::202:b3ff:fe1e:8329"), }, }, { name: "with empty host and port", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: ":80", url: &url.URL{ Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.Int("net.host.port", 80), }, }, { name: "with host ip and port in headers", network: "tcp", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "Host": []string{"4.3.2.1:78"}, }, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "4.3.2.1"), attribute.Int("net.host.port", 78), }, }, { name: "with host ipv4 and port in url", network: "tcp", method: "GET", requestURI: "http://4.3.2.1:78/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "", url: &url.URL{ Host: "4.3.2.1:78", Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "4.3.2.1"), attribute.Int("net.host.port", 78), }, }, { name: "with host ipv6 and port in url", network: "tcp", method: "GET", requestURI: "http://4.3.2.1:78/user/123", proto: "HTTP/1.0", remoteAddr: "1.2.3.4:56", host: "", url: &url.URL{ Host: "[fe80::202:b3ff:fe1e:8329]:78", Path: "/user/123", }, header: nil, expected: []attribute.KeyValue{ attribute.String("net.transport", "ip_tcp"), attribute.String("net.peer.ip", "1.2.3.4"), attribute.Int("net.peer.port", 56), attribute.String("net.host.ip", "fe80::202:b3ff:fe1e:8329"), attribute.Int("net.host.port", 78), }, }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { r := testRequest(tc.method, tc.requestURI, tc.proto, tc.remoteAddr, tc.host, tc.url, tc.header, noTLS) got := sc.NetAttributesFromHTTPRequest(tc.network, r) if diff := cmp.Diff( tc.expected, got, cmp.AllowUnexported(attribute.Value{})); diff != "" { t.Fatalf("attributes differ: diff %+v,", diff) } }) } } func TestEndUserAttributesFromHTTPRequest(t *testing.T) { r := testRequest("GET", "/user/123", "HTTP/1.1", "", "", nil, http.Header{}, withTLS) var expected []attribute.KeyValue got := sc.EndUserAttributesFromHTTPRequest(r) assert.ElementsMatch(t, expected, got) r.SetBasicAuth("admin", "password") expected = []attribute.KeyValue{attribute.String("enduser.id", "admin")} got = sc.EndUserAttributesFromHTTPRequest(r) assert.ElementsMatch(t, expected, got) } func TestHTTPServerAttributesFromHTTPRequest(t *testing.T) { type testcase struct { name string serverName string route string method string requestURI string proto string remoteAddr string host string url *url.URL header http.Header tls tlsOption contentLength int64 expected []attribute.KeyValue } testcases := []testcase{ { name: "stripped", serverName: "", route: "", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: noTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", "1.0"), }, }, { name: "with server name", serverName: "my-server-name", route: "", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: noTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), }, }, { name: "with tls", serverName: "my-server-name", route: "", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), }, }, { name: "with route", serverName: "my-server-name", route: "/user/:id", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.route", "/user/:id"), }, }, { name: "with host", serverName: "my-server-name", route: "/user/:id", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.route", "/user/:id"), attribute.String("http.host", "example.com"), }, }, { name: "with host fallback", serverName: "my-server-name", route: "/user/:id", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Host: "example.com", Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.route", "/user/:id"), attribute.String("http.host", "example.com"), }, }, { name: "with user agent", serverName: "my-server-name", route: "/user/:id", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.route", "/user/:id"), attribute.String("http.host", "example.com"), attribute.String("http.user_agent", "foodownloader"), }, }, { name: "with proxy info", serverName: "my-server-name", route: "/user/:id", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, "X-Forwarded-For": []string{"203.0.113.195, 70.41.3.18, 150.172.238.178"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.route", "/user/:id"), attribute.String("http.host", "example.com"), attribute.String("http.user_agent", "foodownloader"), attribute.String("http.client_ip", "203.0.113.195"), }, }, { name: "with http 1.1", serverName: "my-server-name", route: "/user/:id", method: "GET", requestURI: "/user/123", proto: "HTTP/1.1", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, "X-Forwarded-For": []string{"1.2.3.4"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.1"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.route", "/user/:id"), attribute.String("http.host", "example.com"), attribute.String("http.user_agent", "foodownloader"), attribute.String("http.client_ip", "1.2.3.4"), }, }, { name: "with http 2", serverName: "my-server-name", route: "/user/:id", method: "GET", requestURI: "/user/123", proto: "HTTP/2.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, "X-Forwarded-For": []string{"1.2.3.4"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "2"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.route", "/user/:id"), attribute.String("http.host", "example.com"), attribute.String("http.user_agent", "foodownloader"), attribute.String("http.client_ip", "1.2.3.4"), }, }, { name: "with content length", method: "GET", requestURI: "/user/123", contentLength: 100, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.target", "/user/123"), attribute.String("http.scheme", "http"), attribute.Int64("http.request_content_length", 100), }, }, } for idx, tc := range testcases { r := testRequest(tc.method, tc.requestURI, tc.proto, tc.remoteAddr, tc.host, tc.url, tc.header, tc.tls) r.ContentLength = tc.contentLength got := sc.HTTPServerAttributesFromHTTPRequest(tc.serverName, tc.route, r) assertElementsMatch(t, tc.expected, got, "testcase %d - %s", idx, tc.name) } } func TestHTTPAttributesFromHTTPStatusCode(t *testing.T) { expected := []attribute.KeyValue{ attribute.Int("http.status_code", 404), } got := sc.HTTPAttributesFromHTTPStatusCode(http.StatusNotFound) assertElementsMatch(t, expected, got, "with valid HTTP status code") assert.ElementsMatch(t, expected, got) expected = []attribute.KeyValue{ attribute.Int("http.status_code", 499), } got = sc.HTTPAttributesFromHTTPStatusCode(499) assertElementsMatch(t, expected, got, "with invalid HTTP status code") } func TestSpanStatusFromHTTPStatusCode(t *testing.T) { for code := 0; code < 1000; code++ { expected := getExpectedCodeForHTTPCode(code, trace.SpanKindClient) got, msg := SpanStatusFromHTTPStatusCode(code) assert.Equalf(t, expected, got, "%s vs %s", expected, got) _, valid := validateHTTPStatusCode(code) if !valid { assert.NotEmpty(t, msg, "message should be set if error cannot be inferred from code") } else { assert.Empty(t, msg, "message should not be set if error can be inferred from code") } } } func TestSpanStatusFromHTTPStatusCodeAndSpanKind(t *testing.T) { for code := 0; code < 1000; code++ { expected := getExpectedCodeForHTTPCode(code, trace.SpanKindClient) got, msg := SpanStatusFromHTTPStatusCodeAndSpanKind(code, trace.SpanKindClient) assert.Equalf(t, expected, got, "%s vs %s", expected, got) _, valid := validateHTTPStatusCode(code) if !valid { assert.NotEmpty(t, msg, "message should be set if error cannot be inferred from code") } else { assert.Empty(t, msg, "message should not be set if error can be inferred from code") } } code, _ := SpanStatusFromHTTPStatusCodeAndSpanKind(400, trace.SpanKindServer) assert.Equalf(t, codes.Unset, code, "message should be set if error cannot be inferred from code") } func getExpectedCodeForHTTPCode(code int, spanKind trace.SpanKind) codes.Code { if http.StatusText(code) == "" { return codes.Error } switch code { case http.StatusUnauthorized, http.StatusForbidden, http.StatusNotFound, http.StatusTooManyRequests, http.StatusNotImplemented, http.StatusServiceUnavailable, http.StatusGatewayTimeout: return codes.Error } category := code / 100 if category > 0 && category < 4 { return codes.Unset } if spanKind == trace.SpanKindServer && category == 4 { return codes.Unset } return codes.Error } func assertElementsMatch(t *testing.T, expected, got []attribute.KeyValue, format string, args ...interface{}) { if !assert.ElementsMatchf(t, expected, got, format, args...) { t.Log("expected:", kvStr(expected)) t.Log("got:", kvStr(got)) } } func testRequest(method, requestURI, proto, remoteAddr, host string, u *url.URL, header http.Header, tlsopt tlsOption) *http.Request { major, minor := protoToInts(proto) var tlsConn *tls.ConnectionState switch tlsopt { case noTLS: case withTLS: tlsConn = &tls.ConnectionState{} } return &http.Request{ Method: method, URL: u, Proto: proto, ProtoMajor: major, ProtoMinor: minor, Header: header, Host: host, RemoteAddr: remoteAddr, RequestURI: requestURI, TLS: tlsConn, } } func protoToInts(proto string) (int, int) { switch proto { case "HTTP/1.0": return 1, 0 case "HTTP/1.1": return 1, 1 case "HTTP/2.0": return 2, 0 } // invalid proto return 13, 42 } func kvStr(kvs []attribute.KeyValue) string { sb := strings.Builder{} _, _ = sb.WriteRune('[') for idx, attr := range kvs { if idx > 0 { _, _ = sb.WriteString(", ") } _, _ = sb.WriteString((string)(attr.Key)) _, _ = sb.WriteString(": ") _, _ = sb.WriteString(attr.Value.Emit()) } _, _ = sb.WriteRune(']') return sb.String() } func TestHTTPClientAttributesFromHTTPRequest(t *testing.T) { testCases := []struct { name string method string requestURI string proto string remoteAddr string host string url *url.URL header http.Header tls tlsOption contentLength int64 expected []attribute.KeyValue }{ { name: "stripped", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: noTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", "1.0"), }, }, { name: "with tls", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), }, }, { name: "with host", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.host", "example.com"), }, }, { name: "with host fallback", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Scheme: "https", Host: "example.com", Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "https://example.com/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.host", "example.com"), }, }, { name: "with user agent", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.host", "example.com"), attribute.String("http.user_agent", "foodownloader"), }, }, { name: "with http 1.1", method: "GET", requestURI: "/user/123", proto: "HTTP/1.1", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.1"), attribute.String("http.host", "example.com"), attribute.String("http.user_agent", "foodownloader"), }, }, { name: "with http 2", method: "GET", requestURI: "/user/123", proto: "HTTP/2.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "2"), attribute.String("http.host", "example.com"), attribute.String("http.user_agent", "foodownloader"), }, }, { name: "with content length", method: "GET", url: &url.URL{ Path: "/user/123", }, contentLength: 100, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "http"), attribute.Int64("http.request_content_length", 100), }, }, { name: "with empty method (fallback to GET)", method: "", url: &url.URL{ Path: "/user/123", }, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "http"), }, }, { name: "authentication information is stripped", method: "", url: &url.URL{ Path: "/user/123", User: url.UserPassword("foo", "bar"), }, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.url", "/user/123"), attribute.String("http.scheme", "http"), }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { r := testRequest(tc.method, tc.requestURI, tc.proto, tc.remoteAddr, tc.host, tc.url, tc.header, tc.tls) r.ContentLength = tc.contentLength got := sc.HTTPClientAttributesFromHTTPRequest(r) assert.ElementsMatch(t, tc.expected, got) }) } } func TestHTTPServerMetricAttributesFromHTTPRequest(t *testing.T) { type testcase struct { name string serverName string method string requestURI string proto string remoteAddr string host string url *url.URL header http.Header tls tlsOption contentLength int64 expected []attribute.KeyValue } testcases := []testcase{ { name: "stripped", serverName: "", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: noTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", "1.0"), }, }, { name: "with server name", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: noTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), }, }, { name: "with tls", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), }, }, { name: "with route", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), }, }, { name: "with host", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.host", "example.com"), }, }, { name: "with host fallback", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "", url: &url.URL{ Host: "example.com", Path: "/user/123", }, header: nil, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.host", "example.com"), }, }, { name: "with user agent", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.host", "example.com"), }, }, { name: "with proxy info", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, "X-Forwarded-For": []string{"203.0.113.195, 70.41.3.18, 150.172.238.178"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.0"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.host", "example.com"), }, }, { name: "with http 1.1", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/1.1", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, "X-Forwarded-For": []string{"1.2.3.4"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "1.1"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.host", "example.com"), }, }, { name: "with http 2", serverName: "my-server-name", method: "GET", requestURI: "/user/123", proto: "HTTP/2.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: http.Header{ "User-Agent": []string{"foodownloader"}, "X-Forwarded-For": []string{"1.2.3.4"}, }, tls: withTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "https"), attribute.String("http.flavor", "2"), attribute.String("http.server_name", "my-server-name"), attribute.String("http.host", "example.com"), }, }, } for idx, tc := range testcases { r := testRequest(tc.method, tc.requestURI, tc.proto, tc.remoteAddr, tc.host, tc.url, tc.header, tc.tls) r.ContentLength = tc.contentLength got := sc.HTTPServerMetricAttributesFromHTTPRequest(tc.serverName, r) assertElementsMatch(t, tc.expected, got, "testcase %d - %s", idx, tc.name) } } func TestHttpBasicAttributesFromHTTPRequest(t *testing.T) { type testcase struct { name string method string requestURI string proto string remoteAddr string host string url *url.URL header http.Header tls tlsOption contentLength int64 expected []attribute.KeyValue } testcases := []testcase{ { name: "stripped", method: "GET", requestURI: "/user/123", proto: "HTTP/1.0", remoteAddr: "", host: "example.com", url: &url.URL{ Path: "/user/123", }, header: nil, tls: noTLS, expected: []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", "1.0"), attribute.String("http.host", "example.com"), }, }, } for idx, tc := range testcases { r := testRequest(tc.method, tc.requestURI, tc.proto, tc.remoteAddr, tc.host, tc.url, tc.header, tc.tls) r.ContentLength = tc.contentLength got := sc.httpBasicAttributesFromHTTPRequest(r) assertElementsMatch(t, tc.expected, got, "testcase %d - %s", idx, tc.name) } } opentelemetry-go-1.21.0/semconv/internal/v2/000077500000000000000000000000001452547353200207215ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/internal/v2/http.go000066400000000000000000000272021452547353200222320ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" import ( "fmt" "net/http" "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) // HTTPConv are the HTTP semantic convention attributes defined for a version // of the OpenTelemetry specification. type HTTPConv struct { NetConv *NetConv EnduserIDKey attribute.Key HTTPClientIPKey attribute.Key HTTPFlavorKey attribute.Key HTTPMethodKey attribute.Key HTTPRequestContentLengthKey attribute.Key HTTPResponseContentLengthKey attribute.Key HTTPRouteKey attribute.Key HTTPSchemeHTTP attribute.KeyValue HTTPSchemeHTTPS attribute.KeyValue HTTPStatusCodeKey attribute.Key HTTPTargetKey attribute.Key HTTPURLKey attribute.Key HTTPUserAgentKey attribute.Key } // ClientResponse returns attributes for an HTTP response received by a client // from a server. The following attributes are returned if the related values // are defined in resp: "http.status.code", "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue { var n int if resp.StatusCode > 0 { n++ } if resp.ContentLength > 0 { n++ } attrs := make([]attribute.KeyValue, 0, n) if resp.StatusCode > 0 { attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) } if resp.ContentLength > 0 { attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) } return attrs } // ClientRequest returns attributes for an HTTP request made by a client. The // following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { n := 3 // URL, peer name, proto, and method. var h string if req.URL != nil { h = req.URL.Host } peer, p := firstHostPort(h, req.Header.Get("Host")) port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) if port > 0 { n++ } useragent := req.UserAgent() if useragent != "" { n++ } if req.ContentLength > 0 { n++ } userID, _, hasUserID := req.BasicAuth() if hasUserID { n++ } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.proto(req.Proto)) var u string if req.URL != nil { // Remove any username/password info that may be in the URL. userinfo := req.URL.User req.URL.User = nil u = req.URL.String() // Restore any username/password info that was removed. req.URL.User = userinfo } attrs = append(attrs, c.HTTPURLKey.String(u)) attrs = append(attrs, c.NetConv.PeerName(peer)) if port > 0 { attrs = append(attrs, c.NetConv.PeerPort(port)) } if useragent != "" { attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) } if l := req.ContentLength; l > 0 { attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) } if hasUserID { attrs = append(attrs, c.EnduserIDKey.String(userID)) } return attrs } // ServerRequest returns attributes for an HTTP request received by a server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { // TODO: This currently does not add the specification required // `http.target` attribute. It has too high of a cardinality to safely be // added. An alternate should be added, or this comment removed, when it is // addressed by the specification. If it is ultimately decided to continue // not including the attribute, the HTTPTargetKey field of the HTTPConv // should be removed as well. n := 4 // Method, scheme, proto, and host name. var host string var p int if server == "" { host, p = splitHostPort(req.Host) } else { // Prioritize the primary server name. host, p = splitHostPort(server) if p < 0 { _, p = splitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) if hostPort > 0 { n++ } peer, peerPort := splitHostPort(req.RemoteAddr) if peer != "" { n++ if peerPort > 0 { n++ } } useragent := req.UserAgent() if useragent != "" { n++ } userID, _, hasUserID := req.BasicAuth() if hasUserID { n++ } clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) if clientIP != "" { n++ } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) attrs = append(attrs, c.proto(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) } if peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) if peerPort > 0 { attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) } } if useragent != "" { attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) } if hasUserID { attrs = append(attrs, c.EnduserIDKey.String(userID)) } if clientIP != "" { attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) } return attrs } func (c *HTTPConv) method(method string) attribute.KeyValue { if method == "" { return c.HTTPMethodKey.String(http.MethodGet) } return c.HTTPMethodKey.String(method) } func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return c.HTTPSchemeHTTPS } return c.HTTPSchemeHTTP } func (c *HTTPConv) proto(proto string) attribute.KeyValue { switch proto { case "HTTP/1.0": return c.HTTPFlavorKey.String("1.0") case "HTTP/1.1": return c.HTTPFlavorKey.String("1.1") case "HTTP/2": return c.HTTPFlavorKey.String("2.0") case "HTTP/3": return c.HTTPFlavorKey.String("3.0") default: return c.HTTPFlavorKey.String(proto) } } func serverClientIP(xForwardedFor string) string { if idx := strings.Index(xForwardedFor, ","); idx >= 0 { xForwardedFor = xForwardedFor[:idx] } return xForwardedFor } func requiredHTTPPort(https bool, port int) int { // nolint:revive if https { if port > 0 && port != 443 { return port } } else { if port > 0 && port != 80 { return port } } return -1 } // Return the request host and port from the first non-empty source. func firstHostPort(source ...string) (host string, port int) { for _, hostport := range source { host, port = splitHostPort(hostport) if host != "" || port > 0 { break } } return } // RequestHeader returns the contents of h as OpenTelemetry attributes. func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue { return c.header("http.request.header", h) } // ResponseHeader returns the contents of h as OpenTelemetry attributes. func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { return c.header("http.response.header", h) } func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { key := func(k string) attribute.Key { k = strings.ToLower(k) k = strings.ReplaceAll(k, "-", "_") k = fmt.Sprintf("%s.%s", prefix, k) return attribute.Key(k) } attrs := make([]attribute.KeyValue, 0, len(h)) for k, v := range h { attrs = append(attrs, key(k).StringSlice(v)) } return attrs } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) } return stat, "" } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) } if code/100 == 4 { return codes.Unset, "" } return stat, "" } type codeRange struct { fromInclusive int toInclusive int } func (r codeRange) contains(code int) bool { return r.fromInclusive <= code && code <= r.toInclusive } var validRangesPerCategory = map[int][]codeRange{ 1: { {http.StatusContinue, http.StatusEarlyHints}, }, 2: { {http.StatusOK, http.StatusAlreadyReported}, {http.StatusIMUsed, http.StatusIMUsed}, }, 3: { {http.StatusMultipleChoices, http.StatusUseProxy}, {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, }, 4: { {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, {http.StatusPreconditionRequired, http.StatusTooManyRequests}, {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, }, 5: { {http.StatusInternalServerError, http.StatusLoopDetected}, {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, }, } // validateHTTPStatusCode validates the HTTP status code and returns // corresponding span status code. If the `code` is not a valid HTTP status // code, returns span status Error and false. func validateHTTPStatusCode(code int) (codes.Code, bool) { category := code / 100 ranges, ok := validRangesPerCategory[category] if !ok { return codes.Error, false } ok = false for _, crange := range ranges { ok = crange.contains(code) if ok { break } } if !ok { return codes.Error, false } if category > 0 && category < 4 { return codes.Unset, true } return codes.Error, true } opentelemetry-go-1.21.0/semconv/internal/v2/http_test.go000066400000000000000000000401131452547353200232650ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "net/http" "net/http/httptest" "net/url" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) var hc = &HTTPConv{ NetConv: nc, EnduserIDKey: attribute.Key("enduser.id"), HTTPClientIPKey: attribute.Key("http.client_ip"), HTTPFlavorKey: attribute.Key("http.flavor"), HTTPMethodKey: attribute.Key("http.method"), HTTPRequestContentLengthKey: attribute.Key("http.request_content_length"), HTTPResponseContentLengthKey: attribute.Key("http.response_content_length"), HTTPRouteKey: attribute.Key("http.route"), HTTPSchemeHTTP: attribute.String("http.scheme", "http"), HTTPSchemeHTTPS: attribute.String("http.scheme", "https"), HTTPStatusCodeKey: attribute.Key("http.status_code"), HTTPTargetKey: attribute.Key("http.target"), HTTPURLKey: attribute.Key("http.url"), HTTPUserAgentKey: attribute.Key("http.user_agent"), } func TestHTTPClientResponse(t *testing.T) { const stat, n = 201, 397 resp := &http.Response{ StatusCode: stat, ContentLength: n, } got := hc.ClientResponse(resp) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.Key("http.status_code").Int(stat), attribute.Key("http.response_content_length").Int(n), }, got) } func TestHTTPSClientRequest(t *testing.T) { req := &http.Request{ Method: http.MethodGet, URL: &url.URL{ Scheme: "https", Host: "127.0.0.1:443", Path: "/resource", }, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, } assert.Equal( t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.flavor", "1.0"), attribute.String("http.url", "https://127.0.0.1:443/resource"), attribute.String("net.peer.name", "127.0.0.1"), }, hc.ClientRequest(req), ) } func TestHTTPClientRequest(t *testing.T) { const ( user = "alice" n = 128 agent = "Go-http-client/1.1" ) req := &http.Request{ Method: http.MethodGet, URL: &url.URL{ Scheme: "http", Host: "127.0.0.1:8080", Path: "/resource", }, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Header: http.Header{ "User-Agent": []string{agent}, }, ContentLength: n, } req.SetBasicAuth(user, "pswrd") assert.Equal( t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.flavor", "1.0"), attribute.String("http.url", "http://127.0.0.1:8080/resource"), attribute.String("net.peer.name", "127.0.0.1"), attribute.Int("net.peer.port", 8080), attribute.String("http.user_agent", agent), attribute.Int("http.request_content_length", n), attribute.String("enduser.id", user), }, hc.ClientRequest(req), ) } func TestHTTPClientRequestRequired(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue assert.NotPanics(t, func() { got = hc.ClientRequest(req) }) want := []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.flavor", ""), attribute.String("http.url", ""), attribute.String("net.peer.name", ""), } assert.Equal(t, want, got) } func TestHTTPServerRequest(t *testing.T) { got := make(chan *http.Request, 1) handler := func(w http.ResponseWriter, r *http.Request) { got <- r w.WriteHeader(http.StatusOK) } srv := httptest.NewServer(http.HandlerFunc(handler)) defer srv.Close() srvURL, err := url.Parse(srv.URL) require.NoError(t, err) srvPort, err := strconv.ParseInt(srvURL.Port(), 10, 32) require.NoError(t, err) resp, err := srv.Client().Get(srv.URL) require.NoError(t, err) require.NoError(t, resp.Body.Close()) req := <-got peer, peerPort := splitHostPort(req.RemoteAddr) const user = "alice" req.SetBasicAuth(user, "pswrd") const clientIP = "127.0.0.5" req.Header.Add("X-Forwarded-For", clientIP) assert.ElementsMatch(t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", "1.1"), attribute.String("net.host.name", srvURL.Hostname()), attribute.Int("net.host.port", int(srvPort)), attribute.String("net.sock.peer.addr", peer), attribute.Int("net.sock.peer.port", peerPort), attribute.String("http.user_agent", "Go-http-client/1.1"), attribute.String("enduser.id", user), attribute.String("http.client_ip", clientIP), }, hc.ServerRequest("", req)) } func TestHTTPServerName(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue const ( host = "test.semconv.server" port = 8080 ) portStr := strconv.Itoa(port) server := host + ":" + portStr assert.NotPanics(t, func() { got = hc.ServerRequest(server, req) }) assert.Contains(t, got, attribute.String("net.host.name", host)) assert.Contains(t, got, attribute.Int("net.host.port", port)) req = &http.Request{Host: "alt.host.name:" + portStr} // The server parameter does not include a port, ServerRequest should use // the port in the request Host field. assert.NotPanics(t, func() { got = hc.ServerRequest(host, req) }) assert.Contains(t, got, attribute.String("net.host.name", host)) assert.Contains(t, got, attribute.Int("net.host.port", port)) } func TestHTTPServerRequestFailsGracefully(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue assert.NotPanics(t, func() { got = hc.ServerRequest("", req) }) want := []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", ""), attribute.String("net.host.name", ""), } assert.ElementsMatch(t, want, got) } func TestMethod(t *testing.T) { assert.Equal(t, attribute.String("http.method", "POST"), hc.method("POST")) assert.Equal(t, attribute.String("http.method", "GET"), hc.method("")) assert.Equal(t, attribute.String("http.method", "garbage"), hc.method("garbage")) } func TestScheme(t *testing.T) { assert.Equal(t, attribute.String("http.scheme", "http"), hc.scheme(false)) assert.Equal(t, attribute.String("http.scheme", "https"), hc.scheme(true)) } func TestProto(t *testing.T) { tests := map[string]string{ "HTTP/1.0": "1.0", "HTTP/1.1": "1.1", "HTTP/2": "2.0", "HTTP/3": "3.0", "SPDY": "SPDY", "QUIC": "QUIC", "other": "other", } for proto, want := range tests { expect := attribute.String("http.flavor", want) assert.Equal(t, expect, hc.proto(proto), proto) } } func TestServerClientIP(t *testing.T) { tests := []struct { xForwardedFor string want string }{ {"", ""}, {"127.0.0.1", "127.0.0.1"}, {"127.0.0.1,127.0.0.5", "127.0.0.1"}, } for _, test := range tests { got := serverClientIP(test.xForwardedFor) assert.Equal(t, test.want, got, test.xForwardedFor) } } func TestRequiredHTTPPort(t *testing.T) { tests := []struct { https bool port int want int }{ {true, 443, -1}, {true, 80, 80}, {true, 8081, 8081}, {false, 443, 443}, {false, 80, -1}, {false, 8080, 8080}, } for _, test := range tests { got := requiredHTTPPort(test.https, test.port) assert.Equal(t, test.want, got, test.https, test.port) } } func TestFirstHostPort(t *testing.T) { host, port := "127.0.0.1", 8080 hostport := "127.0.0.1:8080" sources := [][]string{ {hostport}, {"", hostport}, {"", "", hostport}, {"", "", hostport, ""}, {"", "", hostport, "127.0.0.3:80"}, } for _, src := range sources { h, p := firstHostPort(src...) assert.Equal(t, host, h, src) assert.Equal(t, port, p, src) } } func TestRequestHeader(t *testing.T) { ips := []string{"127.0.0.5", "127.0.0.9"} user := []string{"alice"} h := http.Header{"ips": ips, "user": user} got := hc.RequestHeader(h) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.StringSlice("http.request.header.ips", ips), attribute.StringSlice("http.request.header.user", user), }, got) } func TestReponseHeader(t *testing.T) { ips := []string{"127.0.0.5", "127.0.0.9"} user := []string{"alice"} h := http.Header{"ips": ips, "user": user} got := hc.ResponseHeader(h) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.StringSlice("http.response.header.ips", ips), attribute.StringSlice("http.response.header.user", user), }, got) } func TestClientStatus(t *testing.T) { tests := []struct { code int stat codes.Code msg bool }{ {0, codes.Error, true}, {http.StatusContinue, codes.Unset, false}, {http.StatusSwitchingProtocols, codes.Unset, false}, {http.StatusProcessing, codes.Unset, false}, {http.StatusEarlyHints, codes.Unset, false}, {http.StatusOK, codes.Unset, false}, {http.StatusCreated, codes.Unset, false}, {http.StatusAccepted, codes.Unset, false}, {http.StatusNonAuthoritativeInfo, codes.Unset, false}, {http.StatusNoContent, codes.Unset, false}, {http.StatusResetContent, codes.Unset, false}, {http.StatusPartialContent, codes.Unset, false}, {http.StatusMultiStatus, codes.Unset, false}, {http.StatusAlreadyReported, codes.Unset, false}, {http.StatusIMUsed, codes.Unset, false}, {http.StatusMultipleChoices, codes.Unset, false}, {http.StatusMovedPermanently, codes.Unset, false}, {http.StatusFound, codes.Unset, false}, {http.StatusSeeOther, codes.Unset, false}, {http.StatusNotModified, codes.Unset, false}, {http.StatusUseProxy, codes.Unset, false}, {306, codes.Error, true}, {http.StatusTemporaryRedirect, codes.Unset, false}, {http.StatusPermanentRedirect, codes.Unset, false}, {http.StatusBadRequest, codes.Error, false}, {http.StatusUnauthorized, codes.Error, false}, {http.StatusPaymentRequired, codes.Error, false}, {http.StatusForbidden, codes.Error, false}, {http.StatusNotFound, codes.Error, false}, {http.StatusMethodNotAllowed, codes.Error, false}, {http.StatusNotAcceptable, codes.Error, false}, {http.StatusProxyAuthRequired, codes.Error, false}, {http.StatusRequestTimeout, codes.Error, false}, {http.StatusConflict, codes.Error, false}, {http.StatusGone, codes.Error, false}, {http.StatusLengthRequired, codes.Error, false}, {http.StatusPreconditionFailed, codes.Error, false}, {http.StatusRequestEntityTooLarge, codes.Error, false}, {http.StatusRequestURITooLong, codes.Error, false}, {http.StatusUnsupportedMediaType, codes.Error, false}, {http.StatusRequestedRangeNotSatisfiable, codes.Error, false}, {http.StatusExpectationFailed, codes.Error, false}, {http.StatusTeapot, codes.Error, false}, {http.StatusMisdirectedRequest, codes.Error, false}, {http.StatusUnprocessableEntity, codes.Error, false}, {http.StatusLocked, codes.Error, false}, {http.StatusFailedDependency, codes.Error, false}, {http.StatusTooEarly, codes.Error, false}, {http.StatusUpgradeRequired, codes.Error, false}, {http.StatusPreconditionRequired, codes.Error, false}, {http.StatusTooManyRequests, codes.Error, false}, {http.StatusRequestHeaderFieldsTooLarge, codes.Error, false}, {http.StatusUnavailableForLegalReasons, codes.Error, false}, {http.StatusInternalServerError, codes.Error, false}, {http.StatusNotImplemented, codes.Error, false}, {http.StatusBadGateway, codes.Error, false}, {http.StatusServiceUnavailable, codes.Error, false}, {http.StatusGatewayTimeout, codes.Error, false}, {http.StatusHTTPVersionNotSupported, codes.Error, false}, {http.StatusVariantAlsoNegotiates, codes.Error, false}, {http.StatusInsufficientStorage, codes.Error, false}, {http.StatusLoopDetected, codes.Error, false}, {http.StatusNotExtended, codes.Error, false}, {http.StatusNetworkAuthenticationRequired, codes.Error, false}, {600, codes.Error, true}, } for _, test := range tests { c, msg := hc.ClientStatus(test.code) assert.Equal(t, test.stat, c) if test.msg && msg == "" { t.Errorf("expected non-empty message for %d", test.code) } else if !test.msg && msg != "" { t.Errorf("expected empty message for %d, got: %s", test.code, msg) } } } func TestServerStatus(t *testing.T) { tests := []struct { code int stat codes.Code msg bool }{ {0, codes.Error, true}, {http.StatusContinue, codes.Unset, false}, {http.StatusSwitchingProtocols, codes.Unset, false}, {http.StatusProcessing, codes.Unset, false}, {http.StatusEarlyHints, codes.Unset, false}, {http.StatusOK, codes.Unset, false}, {http.StatusCreated, codes.Unset, false}, {http.StatusAccepted, codes.Unset, false}, {http.StatusNonAuthoritativeInfo, codes.Unset, false}, {http.StatusNoContent, codes.Unset, false}, {http.StatusResetContent, codes.Unset, false}, {http.StatusPartialContent, codes.Unset, false}, {http.StatusMultiStatus, codes.Unset, false}, {http.StatusAlreadyReported, codes.Unset, false}, {http.StatusIMUsed, codes.Unset, false}, {http.StatusMultipleChoices, codes.Unset, false}, {http.StatusMovedPermanently, codes.Unset, false}, {http.StatusFound, codes.Unset, false}, {http.StatusSeeOther, codes.Unset, false}, {http.StatusNotModified, codes.Unset, false}, {http.StatusUseProxy, codes.Unset, false}, {306, codes.Error, true}, {http.StatusTemporaryRedirect, codes.Unset, false}, {http.StatusPermanentRedirect, codes.Unset, false}, {http.StatusBadRequest, codes.Unset, false}, {http.StatusUnauthorized, codes.Unset, false}, {http.StatusPaymentRequired, codes.Unset, false}, {http.StatusForbidden, codes.Unset, false}, {http.StatusNotFound, codes.Unset, false}, {http.StatusMethodNotAllowed, codes.Unset, false}, {http.StatusNotAcceptable, codes.Unset, false}, {http.StatusProxyAuthRequired, codes.Unset, false}, {http.StatusRequestTimeout, codes.Unset, false}, {http.StatusConflict, codes.Unset, false}, {http.StatusGone, codes.Unset, false}, {http.StatusLengthRequired, codes.Unset, false}, {http.StatusPreconditionFailed, codes.Unset, false}, {http.StatusRequestEntityTooLarge, codes.Unset, false}, {http.StatusRequestURITooLong, codes.Unset, false}, {http.StatusUnsupportedMediaType, codes.Unset, false}, {http.StatusRequestedRangeNotSatisfiable, codes.Unset, false}, {http.StatusExpectationFailed, codes.Unset, false}, {http.StatusTeapot, codes.Unset, false}, {http.StatusMisdirectedRequest, codes.Unset, false}, {http.StatusUnprocessableEntity, codes.Unset, false}, {http.StatusLocked, codes.Unset, false}, {http.StatusFailedDependency, codes.Unset, false}, {http.StatusTooEarly, codes.Unset, false}, {http.StatusUpgradeRequired, codes.Unset, false}, {http.StatusPreconditionRequired, codes.Unset, false}, {http.StatusTooManyRequests, codes.Unset, false}, {http.StatusRequestHeaderFieldsTooLarge, codes.Unset, false}, {http.StatusUnavailableForLegalReasons, codes.Unset, false}, {http.StatusInternalServerError, codes.Error, false}, {http.StatusNotImplemented, codes.Error, false}, {http.StatusBadGateway, codes.Error, false}, {http.StatusServiceUnavailable, codes.Error, false}, {http.StatusGatewayTimeout, codes.Error, false}, {http.StatusHTTPVersionNotSupported, codes.Error, false}, {http.StatusVariantAlsoNegotiates, codes.Error, false}, {http.StatusInsufficientStorage, codes.Error, false}, {http.StatusLoopDetected, codes.Error, false}, {http.StatusNotExtended, codes.Error, false}, {http.StatusNetworkAuthenticationRequired, codes.Error, false}, {600, codes.Error, true}, } for _, test := range tests { c, msg := hc.ServerStatus(test.code) assert.Equal(t, test.stat, c) if test.msg && msg == "" { t.Errorf("expected non-empty message for %d", test.code) } else if !test.msg && msg != "" { t.Errorf("expected empty message for %d, got: %s", test.code, msg) } } } opentelemetry-go-1.21.0/semconv/internal/v2/net.go000066400000000000000000000202121452547353200220330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" import ( "net" "strconv" "strings" "go.opentelemetry.io/otel/attribute" ) // NetConv are the network semantic convention attributes defined for a version // of the OpenTelemetry specification. type NetConv struct { NetHostNameKey attribute.Key NetHostPortKey attribute.Key NetPeerNameKey attribute.Key NetPeerPortKey attribute.Key NetSockFamilyKey attribute.Key NetSockPeerAddrKey attribute.Key NetSockPeerPortKey attribute.Key NetSockHostAddrKey attribute.Key NetSockHostPortKey attribute.Key NetTransportOther attribute.KeyValue NetTransportTCP attribute.KeyValue NetTransportUDP attribute.KeyValue NetTransportInProc attribute.KeyValue } func (c *NetConv) Transport(network string) attribute.KeyValue { switch network { case "tcp", "tcp4", "tcp6": return c.NetTransportTCP case "udp", "udp4", "udp6": return c.NetTransportUDP case "unix", "unixgram", "unixpacket": return c.NetTransportInProc default: // "ip:*", "ip4:*", and "ip6:*" all are considered other. return c.NetTransportOther } } // Host returns attributes for a network host address. func (c *NetConv) Host(address string) []attribute.KeyValue { h, p := splitHostPort(address) var n int if h != "" { n++ if p > 0 { n++ } } if n == 0 { return nil } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { attrs = append(attrs, c.HostPort(int(p))) } return attrs } // Server returns attributes for a network listener listening at address. See // net.Listen for information about acceptable address values, address should // be the same as the one used to create ln. If ln is nil, only network host // attributes will be returned that describe address. Otherwise, the socket // level information about ln will also be included. func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue { if ln == nil { return c.Host(address) } lAddr := ln.Addr() if lAddr == nil { return c.Host(address) } hostName, hostPort := splitHostPort(address) sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) network := lAddr.Network() sockFamily := family(network, sockHostAddr) n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) n += positiveInt(hostPort, sockHostPort) attr := make([]attribute.KeyValue, 0, n) if hostName != "" { attr = append(attr, c.HostName(hostName)) if hostPort > 0 { // Only if net.host.name is set should net.host.port be. attr = append(attr, c.HostPort(hostPort)) } } if network != "" { attr = append(attr, c.Transport(network)) } if sockFamily != "" { attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) } if sockHostAddr != "" { attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) if sockHostPort > 0 { // Only if net.sock.host.addr is set should net.sock.host.port be. attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) } } return attr } func (c *NetConv) HostName(name string) attribute.KeyValue { return c.NetHostNameKey.String(name) } func (c *NetConv) HostPort(port int) attribute.KeyValue { return c.NetHostPortKey.Int(port) } // Client returns attributes for a client network connection to address. See // net.Dial for information about acceptable address values, address should be // the same as the one used to create conn. If conn is nil, only network peer // attributes will be returned that describe address. Otherwise, the socket // level information about conn will also be included. func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue { if conn == nil { return c.Peer(address) } lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() var network string switch { case lAddr != nil: network = lAddr.Network() case rAddr != nil: network = rAddr.Network() default: return c.Peer(address) } peerName, peerPort := splitHostPort(address) var ( sockFamily string sockPeerAddr string sockPeerPort int sockHostAddr string sockHostPort int ) if lAddr != nil { sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) } if rAddr != nil { sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) } switch { case sockHostAddr != "": sockFamily = family(network, sockHostAddr) case sockPeerAddr != "": sockFamily = family(network, sockPeerAddr) } n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) n += positiveInt(peerPort, sockPeerPort, sockHostPort) attr := make([]attribute.KeyValue, 0, n) if peerName != "" { attr = append(attr, c.PeerName(peerName)) if peerPort > 0 { // Only if net.peer.name is set should net.peer.port be. attr = append(attr, c.PeerPort(peerPort)) } } if network != "" { attr = append(attr, c.Transport(network)) } if sockFamily != "" { attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) } if sockPeerAddr != "" { attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) if sockPeerPort > 0 { // Only if net.sock.peer.addr is set should net.sock.peer.port be. attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) } } if sockHostAddr != "" { attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) if sockHostPort > 0 { // Only if net.sock.host.addr is set should net.sock.host.port be. attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) } } return attr } func family(network, address string) string { switch network { case "unix", "unixgram", "unixpacket": return "unix" default: if ip := net.ParseIP(address); ip != nil { if ip.To4() == nil { return "inet6" } return "inet" } } return "" } func nonZeroStr(strs ...string) int { var n int for _, str := range strs { if str != "" { n++ } } return n } func positiveInt(ints ...int) int { var n int for _, i := range ints { if i > 0 { n++ } } return n } // Peer returns attributes for a network peer address. func (c *NetConv) Peer(address string) []attribute.KeyValue { h, p := splitHostPort(address) var n int if h != "" { n++ if p > 0 { n++ } } if n == 0 { return nil } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { attrs = append(attrs, c.PeerPort(int(p))) } return attrs } func (c *NetConv) PeerName(name string) attribute.KeyValue { return c.NetPeerNameKey.String(name) } func (c *NetConv) PeerPort(port int) attribute.KeyValue { return c.NetPeerPortKey.Int(port) } func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue { return c.NetSockPeerAddrKey.String(addr) } func (c *NetConv) SockPeerPort(port int) attribute.KeyValue { return c.NetSockPeerPortKey.Int(port) } // splitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. func splitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { addrEnd := strings.LastIndex(hostport, "]") if addrEnd < 0 { // Invalid hostport. return } if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { host = hostport[1:addrEnd] return } } else { if i := strings.LastIndex(hostport, ":"); i < 0 { host = hostport return } } host, pStr, err := net.SplitHostPort(hostport) if err != nil { return } p, err := strconv.ParseUint(pStr, 10, 16) if err != nil { return } return host, int(p) } opentelemetry-go-1.21.0/semconv/internal/v2/net_test.go000066400000000000000000000233751452547353200231070ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "net" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" ) const ( addr = "127.0.0.1" port = 1834 ) var nc = &NetConv{ NetHostNameKey: attribute.Key("net.host.name"), NetHostPortKey: attribute.Key("net.host.port"), NetPeerNameKey: attribute.Key("net.peer.name"), NetPeerPortKey: attribute.Key("net.peer.port"), NetSockPeerAddrKey: attribute.Key("net.sock.peer.addr"), NetSockPeerPortKey: attribute.Key("net.sock.peer.port"), NetTransportOther: attribute.String("net.transport", "other"), NetTransportTCP: attribute.String("net.transport", "ip_tcp"), NetTransportUDP: attribute.String("net.transport", "ip_udp"), NetTransportInProc: attribute.String("net.transport", "inproc"), } func TestNetTransport(t *testing.T) { transports := map[string]attribute.KeyValue{ "tcp": attribute.String("net.transport", "ip_tcp"), "tcp4": attribute.String("net.transport", "ip_tcp"), "tcp6": attribute.String("net.transport", "ip_tcp"), "udp": attribute.String("net.transport", "ip_udp"), "udp4": attribute.String("net.transport", "ip_udp"), "udp6": attribute.String("net.transport", "ip_udp"), "unix": attribute.String("net.transport", "inproc"), "unixgram": attribute.String("net.transport", "inproc"), "unixpacket": attribute.String("net.transport", "inproc"), "ip:1": attribute.String("net.transport", "other"), "ip:icmp": attribute.String("net.transport", "other"), "ip4:proto": attribute.String("net.transport", "other"), "ip6:proto": attribute.String("net.transport", "other"), } for network, want := range transports { assert.Equal(t, want, nc.Transport(network)) } } func TestNetServerNilListener(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Server(addr, nil) expected := nc.Host(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type listener struct{ net.Listener } func (listener) Addr() net.Addr { return nil } func TestNetServerNilAddr(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Server(addr, listener{}) expected := nc.Host(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func newTCPListener() (net.Listener, error) { return net.Listen("tcp4", "127.0.0.1:0") } func TestNetServerTCP(t *testing.T) { ln, err := newTCPListener() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() host, pStr, err := net.SplitHostPort(ln.Addr().String()) require.NoError(t, err) port, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Server("example.com:8080", ln) expected := []attribute.KeyValue{ nc.HostName("example.com"), nc.HostPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockHostAddrKey.String(host), nc.NetSockHostPortKey.Int(port), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func TestNetHost(t *testing.T) { testAddrs(t, []addrTest{ {address: "", expected: nil}, {address: "192.0.0.1", expected: []attribute.KeyValue{ nc.HostName("192.0.0.1"), }}, {address: "192.0.0.1:9090", expected: []attribute.KeyValue{ nc.HostName("192.0.0.1"), nc.HostPort(9090), }}, }, nc.Host) } func TestNetHostName(t *testing.T) { expected := attribute.Key("net.host.name").String(addr) assert.Equal(t, expected, nc.HostName(addr)) } func TestNetHostPort(t *testing.T) { expected := attribute.Key("net.host.port").Int(port) assert.Equal(t, expected, nc.HostPort(port)) } func TestNetClientNilConn(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Client(addr, nil) expected := nc.Peer(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type conn struct{ net.Conn } func (conn) LocalAddr() net.Addr { return nil } func (conn) RemoteAddr() net.Addr { return nil } func TestNetClientNilAddr(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Client(addr, conn{}) expected := nc.Peer(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func newTCPConn() (net.Conn, net.Listener, error) { ln, err := newTCPListener() if err != nil { return nil, nil, err } conn, err := net.Dial("tcp4", ln.Addr().String()) if err != nil { _ = ln.Close() return nil, nil, err } return conn, ln, nil } func TestNetClientTCP(t *testing.T) { conn, ln, err := newTCPConn() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() defer func() { require.NoError(t, conn.Close()) }() lHost, pStr, err := net.SplitHostPort(conn.LocalAddr().String()) require.NoError(t, err) lPort, err := strconv.Atoi(pStr) require.NoError(t, err) rHost, pStr, err := net.SplitHostPort(conn.RemoteAddr().String()) require.NoError(t, err) rPort, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Client("example.com:8080", conn) expected := []attribute.KeyValue{ nc.PeerName("example.com"), nc.PeerPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockPeerAddrKey.String(rHost), nc.NetSockPeerPortKey.Int(rPort), nc.NetSockHostAddrKey.String(lHost), nc.NetSockHostPortKey.Int(lPort), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type remoteOnlyConn struct{ net.Conn } func (remoteOnlyConn) LocalAddr() net.Addr { return nil } func TestNetClientTCPNilLocal(t *testing.T) { conn, ln, err := newTCPConn() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() defer func() { require.NoError(t, conn.Close()) }() conn = remoteOnlyConn{conn} rHost, pStr, err := net.SplitHostPort(conn.RemoteAddr().String()) require.NoError(t, err) rPort, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Client("example.com:8080", conn) expected := []attribute.KeyValue{ nc.PeerName("example.com"), nc.PeerPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockPeerAddrKey.String(rHost), nc.NetSockPeerPortKey.Int(rPort), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func TestNetPeer(t *testing.T) { testAddrs(t, []addrTest{ {address: "", expected: nil}, {address: "example.com", expected: []attribute.KeyValue{ nc.PeerName("example.com"), }}, {address: "/tmp/file", expected: []attribute.KeyValue{ nc.PeerName("/tmp/file"), }}, {address: "192.0.0.1", expected: []attribute.KeyValue{ nc.PeerName("192.0.0.1"), }}, {address: ":9090", expected: nil}, {address: "192.0.0.1:9090", expected: []attribute.KeyValue{ nc.PeerName("192.0.0.1"), nc.PeerPort(9090), }}, }, nc.Peer) } func TestNetPeerName(t *testing.T) { expected := attribute.Key("net.peer.name").String(addr) assert.Equal(t, expected, nc.PeerName(addr)) } func TestNetPeerPort(t *testing.T) { expected := attribute.Key("net.peer.port").Int(port) assert.Equal(t, expected, nc.PeerPort(port)) } func TestNetSockPeerName(t *testing.T) { expected := attribute.Key("net.sock.peer.addr").String(addr) assert.Equal(t, expected, nc.SockPeerAddr(addr)) } func TestNetSockPeerPort(t *testing.T) { expected := attribute.Key("net.sock.peer.port").Int(port) assert.Equal(t, expected, nc.SockPeerPort(port)) } func TestFamily(t *testing.T) { tests := []struct { network string address string expect string }{ {"", "", ""}, {"unix", "", "unix"}, {"unix", "gibberish", "unix"}, {"unixgram", "", "unix"}, {"unixgram", "gibberish", "unix"}, {"unixpacket", "gibberish", "unix"}, {"tcp", "123.0.2.8", "inet"}, {"tcp", "gibberish", ""}, {"", "123.0.2.8", "inet"}, {"", "gibberish", ""}, {"tcp", "fe80::1", "inet6"}, {"", "fe80::1", "inet6"}, } for _, test := range tests { got := family(test.network, test.address) assert.Equal(t, test.expect, got, test.network+"/"+test.address) } } func TestSplitHostPort(t *testing.T) { tests := []struct { hostport string host string port int }{ {"", "", -1}, {":8080", "", 8080}, {"127.0.0.1", "127.0.0.1", -1}, {"www.example.com", "www.example.com", -1}, {"127.0.0.1%25en0", "127.0.0.1%25en0", -1}, {"[]", "", -1}, // Ensure this doesn't panic. {"[fe80::1", "", -1}, {"[fe80::1]", "fe80::1", -1}, {"[fe80::1%25en0]", "fe80::1%25en0", -1}, {"[fe80::1]:8080", "fe80::1", 8080}, {"[fe80::1]::", "", -1}, // Too many colons. {"127.0.0.1:", "127.0.0.1", -1}, {"127.0.0.1:port", "127.0.0.1", -1}, {"127.0.0.1:8080", "127.0.0.1", 8080}, {"www.example.com:8080", "www.example.com", 8080}, {"127.0.0.1%25en0:8080", "127.0.0.1%25en0", 8080}, } for _, test := range tests { h, p := splitHostPort(test.hostport) assert.Equal(t, test.host, h, test.hostport) assert.Equal(t, test.port, p, test.hostport) } } type addrTest struct { address string expected []attribute.KeyValue } func testAddrs(t *testing.T, tests []addrTest, f func(string) []attribute.KeyValue) { t.Helper() for _, test := range tests { got := f(test.address) assert.Equal(t, cap(test.expected), cap(got), "slice capacity") assert.ElementsMatch(t, test.expected, got, test.address) } } opentelemetry-go-1.21.0/semconv/internal/v3/000077500000000000000000000000001452547353200207225ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/internal/v3/http.go000066400000000000000000000272121452547353200222340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/semconv/internal/v3" import ( "fmt" "net/http" "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) // HTTPConv are the HTTP semantic convention attributes defined for a version // of the OpenTelemetry specification. type HTTPConv struct { NetConv *NetConv EnduserIDKey attribute.Key HTTPClientIPKey attribute.Key HTTPFlavorKey attribute.Key HTTPMethodKey attribute.Key HTTPRequestContentLengthKey attribute.Key HTTPResponseContentLengthKey attribute.Key HTTPRouteKey attribute.Key HTTPSchemeHTTP attribute.KeyValue HTTPSchemeHTTPS attribute.KeyValue HTTPStatusCodeKey attribute.Key HTTPTargetKey attribute.Key HTTPURLKey attribute.Key UserAgentOriginalKey attribute.Key } // ClientResponse returns attributes for an HTTP response received by a client // from a server. The following attributes are returned if the related values // are defined in resp: "http.status.code", "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue { var n int if resp.StatusCode > 0 { n++ } if resp.ContentLength > 0 { n++ } attrs := make([]attribute.KeyValue, 0, n) if resp.StatusCode > 0 { attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) } if resp.ContentLength > 0 { attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) } return attrs } // ClientRequest returns attributes for an HTTP request made by a client. The // following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { n := 3 // URL, peer name, proto, and method. var h string if req.URL != nil { h = req.URL.Host } peer, p := firstHostPort(h, req.Header.Get("Host")) port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) if port > 0 { n++ } useragent := req.UserAgent() if useragent != "" { n++ } if req.ContentLength > 0 { n++ } userID, _, hasUserID := req.BasicAuth() if hasUserID { n++ } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.proto(req.Proto)) var u string if req.URL != nil { // Remove any username/password info that may be in the URL. userinfo := req.URL.User req.URL.User = nil u = req.URL.String() // Restore any username/password info that was removed. req.URL.User = userinfo } attrs = append(attrs, c.HTTPURLKey.String(u)) attrs = append(attrs, c.NetConv.PeerName(peer)) if port > 0 { attrs = append(attrs, c.NetConv.PeerPort(port)) } if useragent != "" { attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if l := req.ContentLength; l > 0 { attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) } if hasUserID { attrs = append(attrs, c.EnduserIDKey.String(userID)) } return attrs } // ServerRequest returns attributes for an HTTP request received by a server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { // TODO: This currently does not add the specification required // `http.target` attribute. It has too high of a cardinality to safely be // added. An alternate should be added, or this comment removed, when it is // addressed by the specification. If it is ultimately decided to continue // not including the attribute, the HTTPTargetKey field of the HTTPConv // should be removed as well. n := 4 // Method, scheme, proto, and host name. var host string var p int if server == "" { host, p = splitHostPort(req.Host) } else { // Prioritize the primary server name. host, p = splitHostPort(server) if p < 0 { _, p = splitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) if hostPort > 0 { n++ } peer, peerPort := splitHostPort(req.RemoteAddr) if peer != "" { n++ if peerPort > 0 { n++ } } useragent := req.UserAgent() if useragent != "" { n++ } userID, _, hasUserID := req.BasicAuth() if hasUserID { n++ } clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) if clientIP != "" { n++ } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) attrs = append(attrs, c.proto(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) } if peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) if peerPort > 0 { attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) } } if useragent != "" { attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if hasUserID { attrs = append(attrs, c.EnduserIDKey.String(userID)) } if clientIP != "" { attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) } return attrs } func (c *HTTPConv) method(method string) attribute.KeyValue { if method == "" { return c.HTTPMethodKey.String(http.MethodGet) } return c.HTTPMethodKey.String(method) } func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return c.HTTPSchemeHTTPS } return c.HTTPSchemeHTTP } func (c *HTTPConv) proto(proto string) attribute.KeyValue { switch proto { case "HTTP/1.0": return c.HTTPFlavorKey.String("1.0") case "HTTP/1.1": return c.HTTPFlavorKey.String("1.1") case "HTTP/2": return c.HTTPFlavorKey.String("2.0") case "HTTP/3": return c.HTTPFlavorKey.String("3.0") default: return c.HTTPFlavorKey.String(proto) } } func serverClientIP(xForwardedFor string) string { if idx := strings.Index(xForwardedFor, ","); idx >= 0 { xForwardedFor = xForwardedFor[:idx] } return xForwardedFor } func requiredHTTPPort(https bool, port int) int { // nolint:revive if https { if port > 0 && port != 443 { return port } } else { if port > 0 && port != 80 { return port } } return -1 } // Return the request host and port from the first non-empty source. func firstHostPort(source ...string) (host string, port int) { for _, hostport := range source { host, port = splitHostPort(hostport) if host != "" || port > 0 { break } } return } // RequestHeader returns the contents of h as OpenTelemetry attributes. func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue { return c.header("http.request.header", h) } // ResponseHeader returns the contents of h as OpenTelemetry attributes. func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { return c.header("http.response.header", h) } func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { key := func(k string) attribute.Key { k = strings.ToLower(k) k = strings.ReplaceAll(k, "-", "_") k = fmt.Sprintf("%s.%s", prefix, k) return attribute.Key(k) } attrs := make([]attribute.KeyValue, 0, len(h)) for k, v := range h { attrs = append(attrs, key(k).StringSlice(v)) } return attrs } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) } return stat, "" } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) } if code/100 == 4 { return codes.Unset, "" } return stat, "" } type codeRange struct { fromInclusive int toInclusive int } func (r codeRange) contains(code int) bool { return r.fromInclusive <= code && code <= r.toInclusive } var validRangesPerCategory = map[int][]codeRange{ 1: { {http.StatusContinue, http.StatusEarlyHints}, }, 2: { {http.StatusOK, http.StatusAlreadyReported}, {http.StatusIMUsed, http.StatusIMUsed}, }, 3: { {http.StatusMultipleChoices, http.StatusUseProxy}, {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, }, 4: { {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, {http.StatusPreconditionRequired, http.StatusTooManyRequests}, {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, }, 5: { {http.StatusInternalServerError, http.StatusLoopDetected}, {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, }, } // validateHTTPStatusCode validates the HTTP status code and returns // corresponding span status code. If the `code` is not a valid HTTP status // code, returns span status Error and false. func validateHTTPStatusCode(code int) (codes.Code, bool) { category := code / 100 ranges, ok := validRangesPerCategory[category] if !ok { return codes.Error, false } ok = false for _, crange := range ranges { ok = crange.contains(code) if ok { break } } if !ok { return codes.Error, false } if category > 0 && category < 4 { return codes.Unset, true } return codes.Error, true } opentelemetry-go-1.21.0/semconv/internal/v3/http_test.go000066400000000000000000000401271452547353200232730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "net/http" "net/http/httptest" "net/url" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) var hc = &HTTPConv{ NetConv: nc, EnduserIDKey: attribute.Key("enduser.id"), HTTPClientIPKey: attribute.Key("http.client_ip"), HTTPFlavorKey: attribute.Key("http.flavor"), HTTPMethodKey: attribute.Key("http.method"), HTTPRequestContentLengthKey: attribute.Key("http.request_content_length"), HTTPResponseContentLengthKey: attribute.Key("http.response_content_length"), HTTPRouteKey: attribute.Key("http.route"), HTTPSchemeHTTP: attribute.String("http.scheme", "http"), HTTPSchemeHTTPS: attribute.String("http.scheme", "https"), HTTPStatusCodeKey: attribute.Key("http.status_code"), HTTPTargetKey: attribute.Key("http.target"), HTTPURLKey: attribute.Key("http.url"), UserAgentOriginalKey: attribute.Key("user_agent.original"), } func TestHTTPClientResponse(t *testing.T) { const stat, n = 201, 397 resp := &http.Response{ StatusCode: stat, ContentLength: n, } got := hc.ClientResponse(resp) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.Key("http.status_code").Int(stat), attribute.Key("http.response_content_length").Int(n), }, got) } func TestHTTPSClientRequest(t *testing.T) { req := &http.Request{ Method: http.MethodGet, URL: &url.URL{ Scheme: "https", Host: "127.0.0.1:443", Path: "/resource", }, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, } assert.Equal( t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.flavor", "1.0"), attribute.String("http.url", "https://127.0.0.1:443/resource"), attribute.String("net.peer.name", "127.0.0.1"), }, hc.ClientRequest(req), ) } func TestHTTPClientRequest(t *testing.T) { const ( user = "alice" n = 128 agent = "Go-http-client/1.1" ) req := &http.Request{ Method: http.MethodGet, URL: &url.URL{ Scheme: "http", Host: "127.0.0.1:8080", Path: "/resource", }, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Header: http.Header{ "User-Agent": []string{agent}, }, ContentLength: n, } req.SetBasicAuth(user, "pswrd") assert.Equal( t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.flavor", "1.0"), attribute.String("http.url", "http://127.0.0.1:8080/resource"), attribute.String("net.peer.name", "127.0.0.1"), attribute.Int("net.peer.port", 8080), attribute.String("user_agent.original", agent), attribute.Int("http.request_content_length", n), attribute.String("enduser.id", user), }, hc.ClientRequest(req), ) } func TestHTTPClientRequestRequired(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue assert.NotPanics(t, func() { got = hc.ClientRequest(req) }) want := []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.flavor", ""), attribute.String("http.url", ""), attribute.String("net.peer.name", ""), } assert.Equal(t, want, got) } func TestHTTPServerRequest(t *testing.T) { got := make(chan *http.Request, 1) handler := func(w http.ResponseWriter, r *http.Request) { got <- r w.WriteHeader(http.StatusOK) } srv := httptest.NewServer(http.HandlerFunc(handler)) defer srv.Close() srvURL, err := url.Parse(srv.URL) require.NoError(t, err) srvPort, err := strconv.ParseInt(srvURL.Port(), 10, 32) require.NoError(t, err) resp, err := srv.Client().Get(srv.URL) require.NoError(t, err) require.NoError(t, resp.Body.Close()) req := <-got peer, peerPort := splitHostPort(req.RemoteAddr) const user = "alice" req.SetBasicAuth(user, "pswrd") const clientIP = "127.0.0.5" req.Header.Add("X-Forwarded-For", clientIP) assert.ElementsMatch(t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", "1.1"), attribute.String("net.host.name", srvURL.Hostname()), attribute.Int("net.host.port", int(srvPort)), attribute.String("net.sock.peer.addr", peer), attribute.Int("net.sock.peer.port", peerPort), attribute.String("user_agent.original", "Go-http-client/1.1"), attribute.String("enduser.id", user), attribute.String("http.client_ip", clientIP), }, hc.ServerRequest("", req)) } func TestHTTPServerName(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue const ( host = "test.semconv.server" port = 8080 ) portStr := strconv.Itoa(port) server := host + ":" + portStr assert.NotPanics(t, func() { got = hc.ServerRequest(server, req) }) assert.Contains(t, got, attribute.String("net.host.name", host)) assert.Contains(t, got, attribute.Int("net.host.port", port)) req = &http.Request{Host: "alt.host.name:" + portStr} // The server parameter does not include a port, ServerRequest should use // the port in the request Host field. assert.NotPanics(t, func() { got = hc.ServerRequest(host, req) }) assert.Contains(t, got, attribute.String("net.host.name", host)) assert.Contains(t, got, attribute.Int("net.host.port", port)) } func TestHTTPServerRequestFailsGracefully(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue assert.NotPanics(t, func() { got = hc.ServerRequest("", req) }) want := []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("http.flavor", ""), attribute.String("net.host.name", ""), } assert.ElementsMatch(t, want, got) } func TestMethod(t *testing.T) { assert.Equal(t, attribute.String("http.method", "POST"), hc.method("POST")) assert.Equal(t, attribute.String("http.method", "GET"), hc.method("")) assert.Equal(t, attribute.String("http.method", "garbage"), hc.method("garbage")) } func TestScheme(t *testing.T) { assert.Equal(t, attribute.String("http.scheme", "http"), hc.scheme(false)) assert.Equal(t, attribute.String("http.scheme", "https"), hc.scheme(true)) } func TestProto(t *testing.T) { tests := map[string]string{ "HTTP/1.0": "1.0", "HTTP/1.1": "1.1", "HTTP/2": "2.0", "HTTP/3": "3.0", "SPDY": "SPDY", "QUIC": "QUIC", "other": "other", } for proto, want := range tests { expect := attribute.String("http.flavor", want) assert.Equal(t, expect, hc.proto(proto), proto) } } func TestServerClientIP(t *testing.T) { tests := []struct { xForwardedFor string want string }{ {"", ""}, {"127.0.0.1", "127.0.0.1"}, {"127.0.0.1,127.0.0.5", "127.0.0.1"}, } for _, test := range tests { got := serverClientIP(test.xForwardedFor) assert.Equal(t, test.want, got, test.xForwardedFor) } } func TestRequiredHTTPPort(t *testing.T) { tests := []struct { https bool port int want int }{ {true, 443, -1}, {true, 80, 80}, {true, 8081, 8081}, {false, 443, 443}, {false, 80, -1}, {false, 8080, 8080}, } for _, test := range tests { got := requiredHTTPPort(test.https, test.port) assert.Equal(t, test.want, got, test.https, test.port) } } func TestFirstHostPort(t *testing.T) { host, port := "127.0.0.1", 8080 hostport := "127.0.0.1:8080" sources := [][]string{ {hostport}, {"", hostport}, {"", "", hostport}, {"", "", hostport, ""}, {"", "", hostport, "127.0.0.3:80"}, } for _, src := range sources { h, p := firstHostPort(src...) assert.Equal(t, host, h, src) assert.Equal(t, port, p, src) } } func TestRequestHeader(t *testing.T) { ips := []string{"127.0.0.5", "127.0.0.9"} user := []string{"alice"} h := http.Header{"ips": ips, "user": user} got := hc.RequestHeader(h) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.StringSlice("http.request.header.ips", ips), attribute.StringSlice("http.request.header.user", user), }, got) } func TestReponseHeader(t *testing.T) { ips := []string{"127.0.0.5", "127.0.0.9"} user := []string{"alice"} h := http.Header{"ips": ips, "user": user} got := hc.ResponseHeader(h) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.StringSlice("http.response.header.ips", ips), attribute.StringSlice("http.response.header.user", user), }, got) } func TestClientStatus(t *testing.T) { tests := []struct { code int stat codes.Code msg bool }{ {0, codes.Error, true}, {http.StatusContinue, codes.Unset, false}, {http.StatusSwitchingProtocols, codes.Unset, false}, {http.StatusProcessing, codes.Unset, false}, {http.StatusEarlyHints, codes.Unset, false}, {http.StatusOK, codes.Unset, false}, {http.StatusCreated, codes.Unset, false}, {http.StatusAccepted, codes.Unset, false}, {http.StatusNonAuthoritativeInfo, codes.Unset, false}, {http.StatusNoContent, codes.Unset, false}, {http.StatusResetContent, codes.Unset, false}, {http.StatusPartialContent, codes.Unset, false}, {http.StatusMultiStatus, codes.Unset, false}, {http.StatusAlreadyReported, codes.Unset, false}, {http.StatusIMUsed, codes.Unset, false}, {http.StatusMultipleChoices, codes.Unset, false}, {http.StatusMovedPermanently, codes.Unset, false}, {http.StatusFound, codes.Unset, false}, {http.StatusSeeOther, codes.Unset, false}, {http.StatusNotModified, codes.Unset, false}, {http.StatusUseProxy, codes.Unset, false}, {306, codes.Error, true}, {http.StatusTemporaryRedirect, codes.Unset, false}, {http.StatusPermanentRedirect, codes.Unset, false}, {http.StatusBadRequest, codes.Error, false}, {http.StatusUnauthorized, codes.Error, false}, {http.StatusPaymentRequired, codes.Error, false}, {http.StatusForbidden, codes.Error, false}, {http.StatusNotFound, codes.Error, false}, {http.StatusMethodNotAllowed, codes.Error, false}, {http.StatusNotAcceptable, codes.Error, false}, {http.StatusProxyAuthRequired, codes.Error, false}, {http.StatusRequestTimeout, codes.Error, false}, {http.StatusConflict, codes.Error, false}, {http.StatusGone, codes.Error, false}, {http.StatusLengthRequired, codes.Error, false}, {http.StatusPreconditionFailed, codes.Error, false}, {http.StatusRequestEntityTooLarge, codes.Error, false}, {http.StatusRequestURITooLong, codes.Error, false}, {http.StatusUnsupportedMediaType, codes.Error, false}, {http.StatusRequestedRangeNotSatisfiable, codes.Error, false}, {http.StatusExpectationFailed, codes.Error, false}, {http.StatusTeapot, codes.Error, false}, {http.StatusMisdirectedRequest, codes.Error, false}, {http.StatusUnprocessableEntity, codes.Error, false}, {http.StatusLocked, codes.Error, false}, {http.StatusFailedDependency, codes.Error, false}, {http.StatusTooEarly, codes.Error, false}, {http.StatusUpgradeRequired, codes.Error, false}, {http.StatusPreconditionRequired, codes.Error, false}, {http.StatusTooManyRequests, codes.Error, false}, {http.StatusRequestHeaderFieldsTooLarge, codes.Error, false}, {http.StatusUnavailableForLegalReasons, codes.Error, false}, {http.StatusInternalServerError, codes.Error, false}, {http.StatusNotImplemented, codes.Error, false}, {http.StatusBadGateway, codes.Error, false}, {http.StatusServiceUnavailable, codes.Error, false}, {http.StatusGatewayTimeout, codes.Error, false}, {http.StatusHTTPVersionNotSupported, codes.Error, false}, {http.StatusVariantAlsoNegotiates, codes.Error, false}, {http.StatusInsufficientStorage, codes.Error, false}, {http.StatusLoopDetected, codes.Error, false}, {http.StatusNotExtended, codes.Error, false}, {http.StatusNetworkAuthenticationRequired, codes.Error, false}, {600, codes.Error, true}, } for _, test := range tests { c, msg := hc.ClientStatus(test.code) assert.Equal(t, test.stat, c) if test.msg && msg == "" { t.Errorf("expected non-empty message for %d", test.code) } else if !test.msg && msg != "" { t.Errorf("expected empty message for %d, got: %s", test.code, msg) } } } func TestServerStatus(t *testing.T) { tests := []struct { code int stat codes.Code msg bool }{ {0, codes.Error, true}, {http.StatusContinue, codes.Unset, false}, {http.StatusSwitchingProtocols, codes.Unset, false}, {http.StatusProcessing, codes.Unset, false}, {http.StatusEarlyHints, codes.Unset, false}, {http.StatusOK, codes.Unset, false}, {http.StatusCreated, codes.Unset, false}, {http.StatusAccepted, codes.Unset, false}, {http.StatusNonAuthoritativeInfo, codes.Unset, false}, {http.StatusNoContent, codes.Unset, false}, {http.StatusResetContent, codes.Unset, false}, {http.StatusPartialContent, codes.Unset, false}, {http.StatusMultiStatus, codes.Unset, false}, {http.StatusAlreadyReported, codes.Unset, false}, {http.StatusIMUsed, codes.Unset, false}, {http.StatusMultipleChoices, codes.Unset, false}, {http.StatusMovedPermanently, codes.Unset, false}, {http.StatusFound, codes.Unset, false}, {http.StatusSeeOther, codes.Unset, false}, {http.StatusNotModified, codes.Unset, false}, {http.StatusUseProxy, codes.Unset, false}, {306, codes.Error, true}, {http.StatusTemporaryRedirect, codes.Unset, false}, {http.StatusPermanentRedirect, codes.Unset, false}, {http.StatusBadRequest, codes.Unset, false}, {http.StatusUnauthorized, codes.Unset, false}, {http.StatusPaymentRequired, codes.Unset, false}, {http.StatusForbidden, codes.Unset, false}, {http.StatusNotFound, codes.Unset, false}, {http.StatusMethodNotAllowed, codes.Unset, false}, {http.StatusNotAcceptable, codes.Unset, false}, {http.StatusProxyAuthRequired, codes.Unset, false}, {http.StatusRequestTimeout, codes.Unset, false}, {http.StatusConflict, codes.Unset, false}, {http.StatusGone, codes.Unset, false}, {http.StatusLengthRequired, codes.Unset, false}, {http.StatusPreconditionFailed, codes.Unset, false}, {http.StatusRequestEntityTooLarge, codes.Unset, false}, {http.StatusRequestURITooLong, codes.Unset, false}, {http.StatusUnsupportedMediaType, codes.Unset, false}, {http.StatusRequestedRangeNotSatisfiable, codes.Unset, false}, {http.StatusExpectationFailed, codes.Unset, false}, {http.StatusTeapot, codes.Unset, false}, {http.StatusMisdirectedRequest, codes.Unset, false}, {http.StatusUnprocessableEntity, codes.Unset, false}, {http.StatusLocked, codes.Unset, false}, {http.StatusFailedDependency, codes.Unset, false}, {http.StatusTooEarly, codes.Unset, false}, {http.StatusUpgradeRequired, codes.Unset, false}, {http.StatusPreconditionRequired, codes.Unset, false}, {http.StatusTooManyRequests, codes.Unset, false}, {http.StatusRequestHeaderFieldsTooLarge, codes.Unset, false}, {http.StatusUnavailableForLegalReasons, codes.Unset, false}, {http.StatusInternalServerError, codes.Error, false}, {http.StatusNotImplemented, codes.Error, false}, {http.StatusBadGateway, codes.Error, false}, {http.StatusServiceUnavailable, codes.Error, false}, {http.StatusGatewayTimeout, codes.Error, false}, {http.StatusHTTPVersionNotSupported, codes.Error, false}, {http.StatusVariantAlsoNegotiates, codes.Error, false}, {http.StatusInsufficientStorage, codes.Error, false}, {http.StatusLoopDetected, codes.Error, false}, {http.StatusNotExtended, codes.Error, false}, {http.StatusNetworkAuthenticationRequired, codes.Error, false}, {600, codes.Error, true}, } for _, test := range tests { c, msg := hc.ServerStatus(test.code) assert.Equal(t, test.stat, c) if test.msg && msg == "" { t.Errorf("expected non-empty message for %d", test.code) } else if !test.msg && msg != "" { t.Errorf("expected empty message for %d, got: %s", test.code, msg) } } } opentelemetry-go-1.21.0/semconv/internal/v3/net.go000066400000000000000000000202121452547353200220340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/semconv/internal/v3" import ( "net" "strconv" "strings" "go.opentelemetry.io/otel/attribute" ) // NetConv are the network semantic convention attributes defined for a version // of the OpenTelemetry specification. type NetConv struct { NetHostNameKey attribute.Key NetHostPortKey attribute.Key NetPeerNameKey attribute.Key NetPeerPortKey attribute.Key NetSockFamilyKey attribute.Key NetSockPeerAddrKey attribute.Key NetSockPeerPortKey attribute.Key NetSockHostAddrKey attribute.Key NetSockHostPortKey attribute.Key NetTransportOther attribute.KeyValue NetTransportTCP attribute.KeyValue NetTransportUDP attribute.KeyValue NetTransportInProc attribute.KeyValue } func (c *NetConv) Transport(network string) attribute.KeyValue { switch network { case "tcp", "tcp4", "tcp6": return c.NetTransportTCP case "udp", "udp4", "udp6": return c.NetTransportUDP case "unix", "unixgram", "unixpacket": return c.NetTransportInProc default: // "ip:*", "ip4:*", and "ip6:*" all are considered other. return c.NetTransportOther } } // Host returns attributes for a network host address. func (c *NetConv) Host(address string) []attribute.KeyValue { h, p := splitHostPort(address) var n int if h != "" { n++ if p > 0 { n++ } } if n == 0 { return nil } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { attrs = append(attrs, c.HostPort(int(p))) } return attrs } // Server returns attributes for a network listener listening at address. See // net.Listen for information about acceptable address values, address should // be the same as the one used to create ln. If ln is nil, only network host // attributes will be returned that describe address. Otherwise, the socket // level information about ln will also be included. func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue { if ln == nil { return c.Host(address) } lAddr := ln.Addr() if lAddr == nil { return c.Host(address) } hostName, hostPort := splitHostPort(address) sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) network := lAddr.Network() sockFamily := family(network, sockHostAddr) n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) n += positiveInt(hostPort, sockHostPort) attr := make([]attribute.KeyValue, 0, n) if hostName != "" { attr = append(attr, c.HostName(hostName)) if hostPort > 0 { // Only if net.host.name is set should net.host.port be. attr = append(attr, c.HostPort(hostPort)) } } if network != "" { attr = append(attr, c.Transport(network)) } if sockFamily != "" { attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) } if sockHostAddr != "" { attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) if sockHostPort > 0 { // Only if net.sock.host.addr is set should net.sock.host.port be. attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) } } return attr } func (c *NetConv) HostName(name string) attribute.KeyValue { return c.NetHostNameKey.String(name) } func (c *NetConv) HostPort(port int) attribute.KeyValue { return c.NetHostPortKey.Int(port) } // Client returns attributes for a client network connection to address. See // net.Dial for information about acceptable address values, address should be // the same as the one used to create conn. If conn is nil, only network peer // attributes will be returned that describe address. Otherwise, the socket // level information about conn will also be included. func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue { if conn == nil { return c.Peer(address) } lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() var network string switch { case lAddr != nil: network = lAddr.Network() case rAddr != nil: network = rAddr.Network() default: return c.Peer(address) } peerName, peerPort := splitHostPort(address) var ( sockFamily string sockPeerAddr string sockPeerPort int sockHostAddr string sockHostPort int ) if lAddr != nil { sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) } if rAddr != nil { sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) } switch { case sockHostAddr != "": sockFamily = family(network, sockHostAddr) case sockPeerAddr != "": sockFamily = family(network, sockPeerAddr) } n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) n += positiveInt(peerPort, sockPeerPort, sockHostPort) attr := make([]attribute.KeyValue, 0, n) if peerName != "" { attr = append(attr, c.PeerName(peerName)) if peerPort > 0 { // Only if net.peer.name is set should net.peer.port be. attr = append(attr, c.PeerPort(peerPort)) } } if network != "" { attr = append(attr, c.Transport(network)) } if sockFamily != "" { attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) } if sockPeerAddr != "" { attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) if sockPeerPort > 0 { // Only if net.sock.peer.addr is set should net.sock.peer.port be. attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) } } if sockHostAddr != "" { attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) if sockHostPort > 0 { // Only if net.sock.host.addr is set should net.sock.host.port be. attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) } } return attr } func family(network, address string) string { switch network { case "unix", "unixgram", "unixpacket": return "unix" default: if ip := net.ParseIP(address); ip != nil { if ip.To4() == nil { return "inet6" } return "inet" } } return "" } func nonZeroStr(strs ...string) int { var n int for _, str := range strs { if str != "" { n++ } } return n } func positiveInt(ints ...int) int { var n int for _, i := range ints { if i > 0 { n++ } } return n } // Peer returns attributes for a network peer address. func (c *NetConv) Peer(address string) []attribute.KeyValue { h, p := splitHostPort(address) var n int if h != "" { n++ if p > 0 { n++ } } if n == 0 { return nil } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { attrs = append(attrs, c.PeerPort(int(p))) } return attrs } func (c *NetConv) PeerName(name string) attribute.KeyValue { return c.NetPeerNameKey.String(name) } func (c *NetConv) PeerPort(port int) attribute.KeyValue { return c.NetPeerPortKey.Int(port) } func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue { return c.NetSockPeerAddrKey.String(addr) } func (c *NetConv) SockPeerPort(port int) attribute.KeyValue { return c.NetSockPeerPortKey.Int(port) } // splitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. func splitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { addrEnd := strings.LastIndex(hostport, "]") if addrEnd < 0 { // Invalid hostport. return } if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { host = hostport[1:addrEnd] return } } else { if i := strings.LastIndex(hostport, ":"); i < 0 { host = hostport return } } host, pStr, err := net.SplitHostPort(hostport) if err != nil { return } p, err := strconv.ParseUint(pStr, 10, 16) if err != nil { return } return host, int(p) } opentelemetry-go-1.21.0/semconv/internal/v3/net_test.go000066400000000000000000000233751452547353200231100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "net" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" ) const ( addr = "127.0.0.1" port = 1834 ) var nc = &NetConv{ NetHostNameKey: attribute.Key("net.host.name"), NetHostPortKey: attribute.Key("net.host.port"), NetPeerNameKey: attribute.Key("net.peer.name"), NetPeerPortKey: attribute.Key("net.peer.port"), NetSockPeerAddrKey: attribute.Key("net.sock.peer.addr"), NetSockPeerPortKey: attribute.Key("net.sock.peer.port"), NetTransportOther: attribute.String("net.transport", "other"), NetTransportTCP: attribute.String("net.transport", "ip_tcp"), NetTransportUDP: attribute.String("net.transport", "ip_udp"), NetTransportInProc: attribute.String("net.transport", "inproc"), } func TestNetTransport(t *testing.T) { transports := map[string]attribute.KeyValue{ "tcp": attribute.String("net.transport", "ip_tcp"), "tcp4": attribute.String("net.transport", "ip_tcp"), "tcp6": attribute.String("net.transport", "ip_tcp"), "udp": attribute.String("net.transport", "ip_udp"), "udp4": attribute.String("net.transport", "ip_udp"), "udp6": attribute.String("net.transport", "ip_udp"), "unix": attribute.String("net.transport", "inproc"), "unixgram": attribute.String("net.transport", "inproc"), "unixpacket": attribute.String("net.transport", "inproc"), "ip:1": attribute.String("net.transport", "other"), "ip:icmp": attribute.String("net.transport", "other"), "ip4:proto": attribute.String("net.transport", "other"), "ip6:proto": attribute.String("net.transport", "other"), } for network, want := range transports { assert.Equal(t, want, nc.Transport(network)) } } func TestNetServerNilListener(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Server(addr, nil) expected := nc.Host(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type listener struct{ net.Listener } func (listener) Addr() net.Addr { return nil } func TestNetServerNilAddr(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Server(addr, listener{}) expected := nc.Host(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func newTCPListener() (net.Listener, error) { return net.Listen("tcp4", "127.0.0.1:0") } func TestNetServerTCP(t *testing.T) { ln, err := newTCPListener() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() host, pStr, err := net.SplitHostPort(ln.Addr().String()) require.NoError(t, err) port, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Server("example.com:8080", ln) expected := []attribute.KeyValue{ nc.HostName("example.com"), nc.HostPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockHostAddrKey.String(host), nc.NetSockHostPortKey.Int(port), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func TestNetHost(t *testing.T) { testAddrs(t, []addrTest{ {address: "", expected: nil}, {address: "192.0.0.1", expected: []attribute.KeyValue{ nc.HostName("192.0.0.1"), }}, {address: "192.0.0.1:9090", expected: []attribute.KeyValue{ nc.HostName("192.0.0.1"), nc.HostPort(9090), }}, }, nc.Host) } func TestNetHostName(t *testing.T) { expected := attribute.Key("net.host.name").String(addr) assert.Equal(t, expected, nc.HostName(addr)) } func TestNetHostPort(t *testing.T) { expected := attribute.Key("net.host.port").Int(port) assert.Equal(t, expected, nc.HostPort(port)) } func TestNetClientNilConn(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Client(addr, nil) expected := nc.Peer(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type conn struct{ net.Conn } func (conn) LocalAddr() net.Addr { return nil } func (conn) RemoteAddr() net.Addr { return nil } func TestNetClientNilAddr(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Client(addr, conn{}) expected := nc.Peer(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func newTCPConn() (net.Conn, net.Listener, error) { ln, err := newTCPListener() if err != nil { return nil, nil, err } conn, err := net.Dial("tcp4", ln.Addr().String()) if err != nil { _ = ln.Close() return nil, nil, err } return conn, ln, nil } func TestNetClientTCP(t *testing.T) { conn, ln, err := newTCPConn() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() defer func() { require.NoError(t, conn.Close()) }() lHost, pStr, err := net.SplitHostPort(conn.LocalAddr().String()) require.NoError(t, err) lPort, err := strconv.Atoi(pStr) require.NoError(t, err) rHost, pStr, err := net.SplitHostPort(conn.RemoteAddr().String()) require.NoError(t, err) rPort, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Client("example.com:8080", conn) expected := []attribute.KeyValue{ nc.PeerName("example.com"), nc.PeerPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockPeerAddrKey.String(rHost), nc.NetSockPeerPortKey.Int(rPort), nc.NetSockHostAddrKey.String(lHost), nc.NetSockHostPortKey.Int(lPort), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type remoteOnlyConn struct{ net.Conn } func (remoteOnlyConn) LocalAddr() net.Addr { return nil } func TestNetClientTCPNilLocal(t *testing.T) { conn, ln, err := newTCPConn() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() defer func() { require.NoError(t, conn.Close()) }() conn = remoteOnlyConn{conn} rHost, pStr, err := net.SplitHostPort(conn.RemoteAddr().String()) require.NoError(t, err) rPort, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Client("example.com:8080", conn) expected := []attribute.KeyValue{ nc.PeerName("example.com"), nc.PeerPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockPeerAddrKey.String(rHost), nc.NetSockPeerPortKey.Int(rPort), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func TestNetPeer(t *testing.T) { testAddrs(t, []addrTest{ {address: "", expected: nil}, {address: "example.com", expected: []attribute.KeyValue{ nc.PeerName("example.com"), }}, {address: "/tmp/file", expected: []attribute.KeyValue{ nc.PeerName("/tmp/file"), }}, {address: "192.0.0.1", expected: []attribute.KeyValue{ nc.PeerName("192.0.0.1"), }}, {address: ":9090", expected: nil}, {address: "192.0.0.1:9090", expected: []attribute.KeyValue{ nc.PeerName("192.0.0.1"), nc.PeerPort(9090), }}, }, nc.Peer) } func TestNetPeerName(t *testing.T) { expected := attribute.Key("net.peer.name").String(addr) assert.Equal(t, expected, nc.PeerName(addr)) } func TestNetPeerPort(t *testing.T) { expected := attribute.Key("net.peer.port").Int(port) assert.Equal(t, expected, nc.PeerPort(port)) } func TestNetSockPeerName(t *testing.T) { expected := attribute.Key("net.sock.peer.addr").String(addr) assert.Equal(t, expected, nc.SockPeerAddr(addr)) } func TestNetSockPeerPort(t *testing.T) { expected := attribute.Key("net.sock.peer.port").Int(port) assert.Equal(t, expected, nc.SockPeerPort(port)) } func TestFamily(t *testing.T) { tests := []struct { network string address string expect string }{ {"", "", ""}, {"unix", "", "unix"}, {"unix", "gibberish", "unix"}, {"unixgram", "", "unix"}, {"unixgram", "gibberish", "unix"}, {"unixpacket", "gibberish", "unix"}, {"tcp", "123.0.2.8", "inet"}, {"tcp", "gibberish", ""}, {"", "123.0.2.8", "inet"}, {"", "gibberish", ""}, {"tcp", "fe80::1", "inet6"}, {"", "fe80::1", "inet6"}, } for _, test := range tests { got := family(test.network, test.address) assert.Equal(t, test.expect, got, test.network+"/"+test.address) } } func TestSplitHostPort(t *testing.T) { tests := []struct { hostport string host string port int }{ {"", "", -1}, {":8080", "", 8080}, {"127.0.0.1", "127.0.0.1", -1}, {"www.example.com", "www.example.com", -1}, {"127.0.0.1%25en0", "127.0.0.1%25en0", -1}, {"[]", "", -1}, // Ensure this doesn't panic. {"[fe80::1", "", -1}, {"[fe80::1]", "fe80::1", -1}, {"[fe80::1%25en0]", "fe80::1%25en0", -1}, {"[fe80::1]:8080", "fe80::1", 8080}, {"[fe80::1]::", "", -1}, // Too many colons. {"127.0.0.1:", "127.0.0.1", -1}, {"127.0.0.1:port", "127.0.0.1", -1}, {"127.0.0.1:8080", "127.0.0.1", 8080}, {"www.example.com:8080", "www.example.com", 8080}, {"127.0.0.1%25en0:8080", "127.0.0.1%25en0", 8080}, } for _, test := range tests { h, p := splitHostPort(test.hostport) assert.Equal(t, test.host, h, test.hostport) assert.Equal(t, test.port, p, test.hostport) } } type addrTest struct { address string expected []attribute.KeyValue } func testAddrs(t *testing.T, tests []addrTest, f func(string) []attribute.KeyValue) { t.Helper() for _, test := range tests { got := f(test.address) assert.Equal(t, cap(test.expected), cap(got), "slice capacity") assert.ElementsMatch(t, test.expected, got, test.address) } } opentelemetry-go-1.21.0/semconv/internal/v4/000077500000000000000000000000001452547353200207235ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/internal/v4/http.go000066400000000000000000000273331452547353200222410ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/semconv/internal/v4" import ( "fmt" "net/http" "strings" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) // HTTPConv are the HTTP semantic convention attributes defined for a version // of the OpenTelemetry specification. type HTTPConv struct { NetConv *NetConv EnduserIDKey attribute.Key HTTPClientIPKey attribute.Key NetProtocolNameKey attribute.Key NetProtocolVersionKey attribute.Key HTTPMethodKey attribute.Key HTTPRequestContentLengthKey attribute.Key HTTPResponseContentLengthKey attribute.Key HTTPRouteKey attribute.Key HTTPSchemeHTTP attribute.KeyValue HTTPSchemeHTTPS attribute.KeyValue HTTPStatusCodeKey attribute.Key HTTPTargetKey attribute.Key HTTPURLKey attribute.Key UserAgentOriginalKey attribute.Key } // ClientResponse returns attributes for an HTTP response received by a client // from a server. The following attributes are returned if the related values // are defined in resp: "http.status.code", "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue { var n int if resp.StatusCode > 0 { n++ } if resp.ContentLength > 0 { n++ } attrs := make([]attribute.KeyValue, 0, n) if resp.StatusCode > 0 { attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) } if resp.ContentLength > 0 { attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) } return attrs } // ClientRequest returns attributes for an HTTP request made by a client. The // following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { n := 3 // URL, peer name, proto, and method. var h string if req.URL != nil { h = req.URL.Host } peer, p := firstHostPort(h, req.Header.Get("Host")) port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) if port > 0 { n++ } useragent := req.UserAgent() if useragent != "" { n++ } if req.ContentLength > 0 { n++ } userID, _, hasUserID := req.BasicAuth() if hasUserID { n++ } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.proto(req.Proto)) var u string if req.URL != nil { // Remove any username/password info that may be in the URL. userinfo := req.URL.User req.URL.User = nil u = req.URL.String() // Restore any username/password info that was removed. req.URL.User = userinfo } attrs = append(attrs, c.HTTPURLKey.String(u)) attrs = append(attrs, c.NetConv.PeerName(peer)) if port > 0 { attrs = append(attrs, c.NetConv.PeerPort(port)) } if useragent != "" { attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if l := req.ContentLength; l > 0 { attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) } if hasUserID { attrs = append(attrs, c.EnduserIDKey.String(userID)) } return attrs } // ServerRequest returns attributes for an HTTP request received by a server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { // TODO: This currently does not add the specification required // `http.target` attribute. It has too high of a cardinality to safely be // added. An alternate should be added, or this comment removed, when it is // addressed by the specification. If it is ultimately decided to continue // not including the attribute, the HTTPTargetKey field of the HTTPConv // should be removed as well. n := 4 // Method, scheme, proto, and host name. var host string var p int if server == "" { host, p = splitHostPort(req.Host) } else { // Prioritize the primary server name. host, p = splitHostPort(server) if p < 0 { _, p = splitHostPort(req.Host) } } hostPort := requiredHTTPPort(req.TLS != nil, p) if hostPort > 0 { n++ } peer, peerPort := splitHostPort(req.RemoteAddr) if peer != "" { n++ if peerPort > 0 { n++ } } useragent := req.UserAgent() if useragent != "" { n++ } userID, _, hasUserID := req.BasicAuth() if hasUserID { n++ } clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) if clientIP != "" { n++ } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) attrs = append(attrs, c.proto(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) } if peer != "" { // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a // file-path that would be interpreted with a sock family. attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) if peerPort > 0 { attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) } } if useragent != "" { attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if hasUserID { attrs = append(attrs, c.EnduserIDKey.String(userID)) } if clientIP != "" { attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) } return attrs } func (c *HTTPConv) method(method string) attribute.KeyValue { if method == "" { return c.HTTPMethodKey.String(http.MethodGet) } return c.HTTPMethodKey.String(method) } func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return c.HTTPSchemeHTTPS } return c.HTTPSchemeHTTP } func (c *HTTPConv) proto(proto string) attribute.KeyValue { switch proto { case "HTTP/1.0": return c.NetProtocolVersionKey.String("1.0") case "HTTP/1.1": return c.NetProtocolVersionKey.String("1.1") case "HTTP/2": return c.NetProtocolVersionKey.String("2.0") case "HTTP/3": return c.NetProtocolVersionKey.String("3.0") default: return c.NetProtocolNameKey.String(proto) } } func serverClientIP(xForwardedFor string) string { if idx := strings.Index(xForwardedFor, ","); idx >= 0 { xForwardedFor = xForwardedFor[:idx] } return xForwardedFor } func requiredHTTPPort(https bool, port int) int { // nolint:revive if https { if port > 0 && port != 443 { return port } } else { if port > 0 && port != 80 { return port } } return -1 } // Return the request host and port from the first non-empty source. func firstHostPort(source ...string) (host string, port int) { for _, hostport := range source { host, port = splitHostPort(hostport) if host != "" || port > 0 { break } } return } // RequestHeader returns the contents of h as OpenTelemetry attributes. func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue { return c.header("http.request.header", h) } // ResponseHeader returns the contents of h as OpenTelemetry attributes. func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { return c.header("http.response.header", h) } func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { key := func(k string) attribute.Key { k = strings.ToLower(k) k = strings.ReplaceAll(k, "-", "_") k = fmt.Sprintf("%s.%s", prefix, k) return attribute.Key(k) } attrs := make([]attribute.KeyValue, 0, len(h)) for k, v := range h { attrs = append(attrs, key(k).StringSlice(v)) } return attrs } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) } return stat, "" } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { stat, valid := validateHTTPStatusCode(code) if !valid { return stat, fmt.Sprintf("Invalid HTTP status code %d", code) } if code/100 == 4 { return codes.Unset, "" } return stat, "" } type codeRange struct { fromInclusive int toInclusive int } func (r codeRange) contains(code int) bool { return r.fromInclusive <= code && code <= r.toInclusive } var validRangesPerCategory = map[int][]codeRange{ 1: { {http.StatusContinue, http.StatusEarlyHints}, }, 2: { {http.StatusOK, http.StatusAlreadyReported}, {http.StatusIMUsed, http.StatusIMUsed}, }, 3: { {http.StatusMultipleChoices, http.StatusUseProxy}, {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, }, 4: { {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, {http.StatusPreconditionRequired, http.StatusTooManyRequests}, {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, }, 5: { {http.StatusInternalServerError, http.StatusLoopDetected}, {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, }, } // validateHTTPStatusCode validates the HTTP status code and returns // corresponding span status code. If the `code` is not a valid HTTP status // code, returns span status Error and false. func validateHTTPStatusCode(code int) (codes.Code, bool) { category := code / 100 ranges, ok := validRangesPerCategory[category] if !ok { return codes.Error, false } ok = false for _, crange := range ranges { ok = crange.contains(code) if ok { break } } if !ok { return codes.Error, false } if category > 0 && category < 4 { return codes.Unset, true } return codes.Error, true } opentelemetry-go-1.21.0/semconv/internal/v4/http_test.go000066400000000000000000000412541452547353200232760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "net/http" "net/http/httptest" "net/url" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" ) var hc = &HTTPConv{ NetConv: nc, EnduserIDKey: attribute.Key("enduser.id"), HTTPClientIPKey: attribute.Key("http.client_ip"), NetProtocolNameKey: attribute.Key("net.protocol.name"), NetProtocolVersionKey: attribute.Key("net.protocol.version"), HTTPMethodKey: attribute.Key("http.method"), HTTPRequestContentLengthKey: attribute.Key("http.request_content_length"), HTTPResponseContentLengthKey: attribute.Key("http.response_content_length"), HTTPRouteKey: attribute.Key("http.route"), HTTPSchemeHTTP: attribute.String("http.scheme", "http"), HTTPSchemeHTTPS: attribute.String("http.scheme", "https"), HTTPStatusCodeKey: attribute.Key("http.status_code"), HTTPTargetKey: attribute.Key("http.target"), HTTPURLKey: attribute.Key("http.url"), UserAgentOriginalKey: attribute.Key("user_agent.original"), } func TestHTTPClientResponse(t *testing.T) { const stat, n = 201, 397 resp := &http.Response{ StatusCode: stat, ContentLength: n, } got := hc.ClientResponse(resp) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.Key("http.status_code").Int(stat), attribute.Key("http.response_content_length").Int(n), }, got) } func TestHTTPSClientRequest(t *testing.T) { req := &http.Request{ Method: http.MethodGet, URL: &url.URL{ Scheme: "https", Host: "127.0.0.1:443", Path: "/resource", }, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, } assert.Equal( t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("net.protocol.version", "1.0"), attribute.String("http.url", "https://127.0.0.1:443/resource"), attribute.String("net.peer.name", "127.0.0.1"), }, hc.ClientRequest(req), ) } func TestHTTPClientRequest(t *testing.T) { const ( user = "alice" n = 128 agent = "Go-http-client/1.1" ) req := &http.Request{ Method: http.MethodGet, URL: &url.URL{ Scheme: "http", Host: "127.0.0.1:8080", Path: "/resource", }, Proto: "HTTP/1.0", ProtoMajor: 1, ProtoMinor: 0, Header: http.Header{ "User-Agent": []string{agent}, }, ContentLength: n, } req.SetBasicAuth(user, "pswrd") assert.Equal( t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("net.protocol.version", "1.0"), attribute.String("http.url", "http://127.0.0.1:8080/resource"), attribute.String("net.peer.name", "127.0.0.1"), attribute.Int("net.peer.port", 8080), attribute.String("user_agent.original", agent), attribute.Int("http.request_content_length", n), attribute.String("enduser.id", user), }, hc.ClientRequest(req), ) } func TestHTTPClientRequestRequired(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue assert.NotPanics(t, func() { got = hc.ClientRequest(req) }) want := []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("net.protocol.name", ""), attribute.String("http.url", ""), attribute.String("net.peer.name", ""), } assert.Equal(t, want, got) } func TestHTTPServerRequest(t *testing.T) { got := make(chan *http.Request, 1) handler := func(w http.ResponseWriter, r *http.Request) { got <- r w.WriteHeader(http.StatusOK) } srv := httptest.NewServer(http.HandlerFunc(handler)) defer srv.Close() srvURL, err := url.Parse(srv.URL) require.NoError(t, err) srvPort, err := strconv.ParseInt(srvURL.Port(), 10, 32) require.NoError(t, err) resp, err := srv.Client().Get(srv.URL) require.NoError(t, err) require.NoError(t, resp.Body.Close()) req := <-got peer, peerPort := splitHostPort(req.RemoteAddr) const user = "alice" req.SetBasicAuth(user, "pswrd") const clientIP = "127.0.0.5" req.Header.Add("X-Forwarded-For", clientIP) assert.ElementsMatch(t, []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("net.protocol.version", "1.1"), attribute.String("net.host.name", srvURL.Hostname()), attribute.Int("net.host.port", int(srvPort)), attribute.String("net.sock.peer.addr", peer), attribute.Int("net.sock.peer.port", peerPort), attribute.String("user_agent.original", "Go-http-client/1.1"), attribute.String("enduser.id", user), attribute.String("http.client_ip", clientIP), }, hc.ServerRequest("", req)) } func TestHTTPServerName(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue const ( host = "test.semconv.server" port = 8080 ) portStr := strconv.Itoa(port) server := host + ":" + portStr assert.NotPanics(t, func() { got = hc.ServerRequest(server, req) }) assert.Contains(t, got, attribute.String("net.host.name", host)) assert.Contains(t, got, attribute.Int("net.host.port", port)) req = &http.Request{Host: "alt.host.name:" + portStr} // The server parameter does not include a port, ServerRequest should use // the port in the request Host field. assert.NotPanics(t, func() { got = hc.ServerRequest(host, req) }) assert.Contains(t, got, attribute.String("net.host.name", host)) assert.Contains(t, got, attribute.Int("net.host.port", port)) } func TestHTTPServerRequestFailsGracefully(t *testing.T) { req := new(http.Request) var got []attribute.KeyValue assert.NotPanics(t, func() { got = hc.ServerRequest("", req) }) want := []attribute.KeyValue{ attribute.String("http.method", "GET"), attribute.String("http.scheme", "http"), attribute.String("net.protocol.name", ""), attribute.String("net.host.name", ""), } assert.ElementsMatch(t, want, got) } func TestMethod(t *testing.T) { assert.Equal(t, attribute.String("http.method", "POST"), hc.method("POST")) assert.Equal(t, attribute.String("http.method", "GET"), hc.method("")) assert.Equal(t, attribute.String("http.method", "garbage"), hc.method("garbage")) } func TestScheme(t *testing.T) { assert.Equal(t, attribute.String("http.scheme", "http"), hc.scheme(false)) assert.Equal(t, attribute.String("http.scheme", "https"), hc.scheme(true)) } func TestProto(t *testing.T) { testCases := []struct { in string want attribute.KeyValue }{ { in: "HTTP/1.0", want: attribute.String("net.protocol.version", "1.0"), }, { in: "HTTP/1.1", want: attribute.String("net.protocol.version", "1.1"), }, { in: "HTTP/2", want: attribute.String("net.protocol.version", "2.0"), }, { in: "HTTP/3", want: attribute.String("net.protocol.version", "3.0"), }, { in: "SPDY", want: attribute.String("net.protocol.name", "SPDY"), }, { in: "QUIC", want: attribute.String("net.protocol.name", "QUIC"), }, { in: "other", want: attribute.String("net.protocol.name", "other"), }, } for _, tc := range testCases { t.Run(tc.in, func(t *testing.T) { got := hc.proto(tc.in) assert.Equal(t, tc.want, got) }) } } func TestServerClientIP(t *testing.T) { tests := []struct { xForwardedFor string want string }{ {"", ""}, {"127.0.0.1", "127.0.0.1"}, {"127.0.0.1,127.0.0.5", "127.0.0.1"}, } for _, test := range tests { got := serverClientIP(test.xForwardedFor) assert.Equal(t, test.want, got, test.xForwardedFor) } } func TestRequiredHTTPPort(t *testing.T) { tests := []struct { https bool port int want int }{ {true, 443, -1}, {true, 80, 80}, {true, 8081, 8081}, {false, 443, 443}, {false, 80, -1}, {false, 8080, 8080}, } for _, test := range tests { got := requiredHTTPPort(test.https, test.port) assert.Equal(t, test.want, got, test.https, test.port) } } func TestFirstHostPort(t *testing.T) { host, port := "127.0.0.1", 8080 hostport := "127.0.0.1:8080" sources := [][]string{ {hostport}, {"", hostport}, {"", "", hostport}, {"", "", hostport, ""}, {"", "", hostport, "127.0.0.3:80"}, } for _, src := range sources { h, p := firstHostPort(src...) assert.Equal(t, host, h, src) assert.Equal(t, port, p, src) } } func TestRequestHeader(t *testing.T) { ips := []string{"127.0.0.5", "127.0.0.9"} user := []string{"alice"} h := http.Header{"ips": ips, "user": user} got := hc.RequestHeader(h) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.StringSlice("http.request.header.ips", ips), attribute.StringSlice("http.request.header.user", user), }, got) } func TestReponseHeader(t *testing.T) { ips := []string{"127.0.0.5", "127.0.0.9"} user := []string{"alice"} h := http.Header{"ips": ips, "user": user} got := hc.ResponseHeader(h) assert.Equal(t, 2, cap(got), "slice capacity") assert.ElementsMatch(t, []attribute.KeyValue{ attribute.StringSlice("http.response.header.ips", ips), attribute.StringSlice("http.response.header.user", user), }, got) } func TestClientStatus(t *testing.T) { tests := []struct { code int stat codes.Code msg bool }{ {0, codes.Error, true}, {http.StatusContinue, codes.Unset, false}, {http.StatusSwitchingProtocols, codes.Unset, false}, {http.StatusProcessing, codes.Unset, false}, {http.StatusEarlyHints, codes.Unset, false}, {http.StatusOK, codes.Unset, false}, {http.StatusCreated, codes.Unset, false}, {http.StatusAccepted, codes.Unset, false}, {http.StatusNonAuthoritativeInfo, codes.Unset, false}, {http.StatusNoContent, codes.Unset, false}, {http.StatusResetContent, codes.Unset, false}, {http.StatusPartialContent, codes.Unset, false}, {http.StatusMultiStatus, codes.Unset, false}, {http.StatusAlreadyReported, codes.Unset, false}, {http.StatusIMUsed, codes.Unset, false}, {http.StatusMultipleChoices, codes.Unset, false}, {http.StatusMovedPermanently, codes.Unset, false}, {http.StatusFound, codes.Unset, false}, {http.StatusSeeOther, codes.Unset, false}, {http.StatusNotModified, codes.Unset, false}, {http.StatusUseProxy, codes.Unset, false}, {306, codes.Error, true}, {http.StatusTemporaryRedirect, codes.Unset, false}, {http.StatusPermanentRedirect, codes.Unset, false}, {http.StatusBadRequest, codes.Error, false}, {http.StatusUnauthorized, codes.Error, false}, {http.StatusPaymentRequired, codes.Error, false}, {http.StatusForbidden, codes.Error, false}, {http.StatusNotFound, codes.Error, false}, {http.StatusMethodNotAllowed, codes.Error, false}, {http.StatusNotAcceptable, codes.Error, false}, {http.StatusProxyAuthRequired, codes.Error, false}, {http.StatusRequestTimeout, codes.Error, false}, {http.StatusConflict, codes.Error, false}, {http.StatusGone, codes.Error, false}, {http.StatusLengthRequired, codes.Error, false}, {http.StatusPreconditionFailed, codes.Error, false}, {http.StatusRequestEntityTooLarge, codes.Error, false}, {http.StatusRequestURITooLong, codes.Error, false}, {http.StatusUnsupportedMediaType, codes.Error, false}, {http.StatusRequestedRangeNotSatisfiable, codes.Error, false}, {http.StatusExpectationFailed, codes.Error, false}, {http.StatusTeapot, codes.Error, false}, {http.StatusMisdirectedRequest, codes.Error, false}, {http.StatusUnprocessableEntity, codes.Error, false}, {http.StatusLocked, codes.Error, false}, {http.StatusFailedDependency, codes.Error, false}, {http.StatusTooEarly, codes.Error, false}, {http.StatusUpgradeRequired, codes.Error, false}, {http.StatusPreconditionRequired, codes.Error, false}, {http.StatusTooManyRequests, codes.Error, false}, {http.StatusRequestHeaderFieldsTooLarge, codes.Error, false}, {http.StatusUnavailableForLegalReasons, codes.Error, false}, {http.StatusInternalServerError, codes.Error, false}, {http.StatusNotImplemented, codes.Error, false}, {http.StatusBadGateway, codes.Error, false}, {http.StatusServiceUnavailable, codes.Error, false}, {http.StatusGatewayTimeout, codes.Error, false}, {http.StatusHTTPVersionNotSupported, codes.Error, false}, {http.StatusVariantAlsoNegotiates, codes.Error, false}, {http.StatusInsufficientStorage, codes.Error, false}, {http.StatusLoopDetected, codes.Error, false}, {http.StatusNotExtended, codes.Error, false}, {http.StatusNetworkAuthenticationRequired, codes.Error, false}, {600, codes.Error, true}, } for _, test := range tests { c, msg := hc.ClientStatus(test.code) assert.Equal(t, test.stat, c) if test.msg && msg == "" { t.Errorf("expected non-empty message for %d", test.code) } else if !test.msg && msg != "" { t.Errorf("expected empty message for %d, got: %s", test.code, msg) } } } func TestServerStatus(t *testing.T) { tests := []struct { code int stat codes.Code msg bool }{ {0, codes.Error, true}, {http.StatusContinue, codes.Unset, false}, {http.StatusSwitchingProtocols, codes.Unset, false}, {http.StatusProcessing, codes.Unset, false}, {http.StatusEarlyHints, codes.Unset, false}, {http.StatusOK, codes.Unset, false}, {http.StatusCreated, codes.Unset, false}, {http.StatusAccepted, codes.Unset, false}, {http.StatusNonAuthoritativeInfo, codes.Unset, false}, {http.StatusNoContent, codes.Unset, false}, {http.StatusResetContent, codes.Unset, false}, {http.StatusPartialContent, codes.Unset, false}, {http.StatusMultiStatus, codes.Unset, false}, {http.StatusAlreadyReported, codes.Unset, false}, {http.StatusIMUsed, codes.Unset, false}, {http.StatusMultipleChoices, codes.Unset, false}, {http.StatusMovedPermanently, codes.Unset, false}, {http.StatusFound, codes.Unset, false}, {http.StatusSeeOther, codes.Unset, false}, {http.StatusNotModified, codes.Unset, false}, {http.StatusUseProxy, codes.Unset, false}, {306, codes.Error, true}, {http.StatusTemporaryRedirect, codes.Unset, false}, {http.StatusPermanentRedirect, codes.Unset, false}, {http.StatusBadRequest, codes.Unset, false}, {http.StatusUnauthorized, codes.Unset, false}, {http.StatusPaymentRequired, codes.Unset, false}, {http.StatusForbidden, codes.Unset, false}, {http.StatusNotFound, codes.Unset, false}, {http.StatusMethodNotAllowed, codes.Unset, false}, {http.StatusNotAcceptable, codes.Unset, false}, {http.StatusProxyAuthRequired, codes.Unset, false}, {http.StatusRequestTimeout, codes.Unset, false}, {http.StatusConflict, codes.Unset, false}, {http.StatusGone, codes.Unset, false}, {http.StatusLengthRequired, codes.Unset, false}, {http.StatusPreconditionFailed, codes.Unset, false}, {http.StatusRequestEntityTooLarge, codes.Unset, false}, {http.StatusRequestURITooLong, codes.Unset, false}, {http.StatusUnsupportedMediaType, codes.Unset, false}, {http.StatusRequestedRangeNotSatisfiable, codes.Unset, false}, {http.StatusExpectationFailed, codes.Unset, false}, {http.StatusTeapot, codes.Unset, false}, {http.StatusMisdirectedRequest, codes.Unset, false}, {http.StatusUnprocessableEntity, codes.Unset, false}, {http.StatusLocked, codes.Unset, false}, {http.StatusFailedDependency, codes.Unset, false}, {http.StatusTooEarly, codes.Unset, false}, {http.StatusUpgradeRequired, codes.Unset, false}, {http.StatusPreconditionRequired, codes.Unset, false}, {http.StatusTooManyRequests, codes.Unset, false}, {http.StatusRequestHeaderFieldsTooLarge, codes.Unset, false}, {http.StatusUnavailableForLegalReasons, codes.Unset, false}, {http.StatusInternalServerError, codes.Error, false}, {http.StatusNotImplemented, codes.Error, false}, {http.StatusBadGateway, codes.Error, false}, {http.StatusServiceUnavailable, codes.Error, false}, {http.StatusGatewayTimeout, codes.Error, false}, {http.StatusHTTPVersionNotSupported, codes.Error, false}, {http.StatusVariantAlsoNegotiates, codes.Error, false}, {http.StatusInsufficientStorage, codes.Error, false}, {http.StatusLoopDetected, codes.Error, false}, {http.StatusNotExtended, codes.Error, false}, {http.StatusNetworkAuthenticationRequired, codes.Error, false}, {600, codes.Error, true}, } for _, test := range tests { c, msg := hc.ServerStatus(test.code) assert.Equal(t, test.stat, c) if test.msg && msg == "" { t.Errorf("expected non-empty message for %d", test.code) } else if !test.msg && msg != "" { t.Errorf("expected empty message for %d, got: %s", test.code, msg) } } } opentelemetry-go-1.21.0/semconv/internal/v4/net.go000066400000000000000000000202121452547353200220350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opentelemetry.io/otel/semconv/internal/v4" import ( "net" "strconv" "strings" "go.opentelemetry.io/otel/attribute" ) // NetConv are the network semantic convention attributes defined for a version // of the OpenTelemetry specification. type NetConv struct { NetHostNameKey attribute.Key NetHostPortKey attribute.Key NetPeerNameKey attribute.Key NetPeerPortKey attribute.Key NetSockFamilyKey attribute.Key NetSockPeerAddrKey attribute.Key NetSockPeerPortKey attribute.Key NetSockHostAddrKey attribute.Key NetSockHostPortKey attribute.Key NetTransportOther attribute.KeyValue NetTransportTCP attribute.KeyValue NetTransportUDP attribute.KeyValue NetTransportInProc attribute.KeyValue } func (c *NetConv) Transport(network string) attribute.KeyValue { switch network { case "tcp", "tcp4", "tcp6": return c.NetTransportTCP case "udp", "udp4", "udp6": return c.NetTransportUDP case "unix", "unixgram", "unixpacket": return c.NetTransportInProc default: // "ip:*", "ip4:*", and "ip6:*" all are considered other. return c.NetTransportOther } } // Host returns attributes for a network host address. func (c *NetConv) Host(address string) []attribute.KeyValue { h, p := splitHostPort(address) var n int if h != "" { n++ if p > 0 { n++ } } if n == 0 { return nil } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.HostName(h)) if p > 0 { attrs = append(attrs, c.HostPort(int(p))) } return attrs } // Server returns attributes for a network listener listening at address. See // net.Listen for information about acceptable address values, address should // be the same as the one used to create ln. If ln is nil, only network host // attributes will be returned that describe address. Otherwise, the socket // level information about ln will also be included. func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue { if ln == nil { return c.Host(address) } lAddr := ln.Addr() if lAddr == nil { return c.Host(address) } hostName, hostPort := splitHostPort(address) sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) network := lAddr.Network() sockFamily := family(network, sockHostAddr) n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) n += positiveInt(hostPort, sockHostPort) attr := make([]attribute.KeyValue, 0, n) if hostName != "" { attr = append(attr, c.HostName(hostName)) if hostPort > 0 { // Only if net.host.name is set should net.host.port be. attr = append(attr, c.HostPort(hostPort)) } } if network != "" { attr = append(attr, c.Transport(network)) } if sockFamily != "" { attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) } if sockHostAddr != "" { attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) if sockHostPort > 0 { // Only if net.sock.host.addr is set should net.sock.host.port be. attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) } } return attr } func (c *NetConv) HostName(name string) attribute.KeyValue { return c.NetHostNameKey.String(name) } func (c *NetConv) HostPort(port int) attribute.KeyValue { return c.NetHostPortKey.Int(port) } // Client returns attributes for a client network connection to address. See // net.Dial for information about acceptable address values, address should be // the same as the one used to create conn. If conn is nil, only network peer // attributes will be returned that describe address. Otherwise, the socket // level information about conn will also be included. func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue { if conn == nil { return c.Peer(address) } lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() var network string switch { case lAddr != nil: network = lAddr.Network() case rAddr != nil: network = rAddr.Network() default: return c.Peer(address) } peerName, peerPort := splitHostPort(address) var ( sockFamily string sockPeerAddr string sockPeerPort int sockHostAddr string sockHostPort int ) if lAddr != nil { sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) } if rAddr != nil { sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) } switch { case sockHostAddr != "": sockFamily = family(network, sockHostAddr) case sockPeerAddr != "": sockFamily = family(network, sockPeerAddr) } n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) n += positiveInt(peerPort, sockPeerPort, sockHostPort) attr := make([]attribute.KeyValue, 0, n) if peerName != "" { attr = append(attr, c.PeerName(peerName)) if peerPort > 0 { // Only if net.peer.name is set should net.peer.port be. attr = append(attr, c.PeerPort(peerPort)) } } if network != "" { attr = append(attr, c.Transport(network)) } if sockFamily != "" { attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) } if sockPeerAddr != "" { attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) if sockPeerPort > 0 { // Only if net.sock.peer.addr is set should net.sock.peer.port be. attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) } } if sockHostAddr != "" { attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) if sockHostPort > 0 { // Only if net.sock.host.addr is set should net.sock.host.port be. attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) } } return attr } func family(network, address string) string { switch network { case "unix", "unixgram", "unixpacket": return "unix" default: if ip := net.ParseIP(address); ip != nil { if ip.To4() == nil { return "inet6" } return "inet" } } return "" } func nonZeroStr(strs ...string) int { var n int for _, str := range strs { if str != "" { n++ } } return n } func positiveInt(ints ...int) int { var n int for _, i := range ints { if i > 0 { n++ } } return n } // Peer returns attributes for a network peer address. func (c *NetConv) Peer(address string) []attribute.KeyValue { h, p := splitHostPort(address) var n int if h != "" { n++ if p > 0 { n++ } } if n == 0 { return nil } attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.PeerName(h)) if p > 0 { attrs = append(attrs, c.PeerPort(int(p))) } return attrs } func (c *NetConv) PeerName(name string) attribute.KeyValue { return c.NetPeerNameKey.String(name) } func (c *NetConv) PeerPort(port int) attribute.KeyValue { return c.NetPeerPortKey.Int(port) } func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue { return c.NetSockPeerAddrKey.String(addr) } func (c *NetConv) SockPeerPort(port int) attribute.KeyValue { return c.NetSockPeerPortKey.Int(port) } // splitHostPort splits a network address hostport of the form "host", // "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", // "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and // port. // // An empty host is returned if it is not provided or unparsable. A negative // port is returned if it is not provided or unparsable. func splitHostPort(hostport string) (host string, port int) { port = -1 if strings.HasPrefix(hostport, "[") { addrEnd := strings.LastIndex(hostport, "]") if addrEnd < 0 { // Invalid hostport. return } if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { host = hostport[1:addrEnd] return } } else { if i := strings.LastIndex(hostport, ":"); i < 0 { host = hostport return } } host, pStr, err := net.SplitHostPort(hostport) if err != nil { return } p, err := strconv.ParseUint(pStr, 10, 16) if err != nil { return } return host, int(p) } opentelemetry-go-1.21.0/semconv/internal/v4/net_test.go000066400000000000000000000233751452547353200231110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "net" "strconv" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/attribute" ) const ( addr = "127.0.0.1" port = 1834 ) var nc = &NetConv{ NetHostNameKey: attribute.Key("net.host.name"), NetHostPortKey: attribute.Key("net.host.port"), NetPeerNameKey: attribute.Key("net.peer.name"), NetPeerPortKey: attribute.Key("net.peer.port"), NetSockPeerAddrKey: attribute.Key("net.sock.peer.addr"), NetSockPeerPortKey: attribute.Key("net.sock.peer.port"), NetTransportOther: attribute.String("net.transport", "other"), NetTransportTCP: attribute.String("net.transport", "ip_tcp"), NetTransportUDP: attribute.String("net.transport", "ip_udp"), NetTransportInProc: attribute.String("net.transport", "inproc"), } func TestNetTransport(t *testing.T) { transports := map[string]attribute.KeyValue{ "tcp": attribute.String("net.transport", "ip_tcp"), "tcp4": attribute.String("net.transport", "ip_tcp"), "tcp6": attribute.String("net.transport", "ip_tcp"), "udp": attribute.String("net.transport", "ip_udp"), "udp4": attribute.String("net.transport", "ip_udp"), "udp6": attribute.String("net.transport", "ip_udp"), "unix": attribute.String("net.transport", "inproc"), "unixgram": attribute.String("net.transport", "inproc"), "unixpacket": attribute.String("net.transport", "inproc"), "ip:1": attribute.String("net.transport", "other"), "ip:icmp": attribute.String("net.transport", "other"), "ip4:proto": attribute.String("net.transport", "other"), "ip6:proto": attribute.String("net.transport", "other"), } for network, want := range transports { assert.Equal(t, want, nc.Transport(network)) } } func TestNetServerNilListener(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Server(addr, nil) expected := nc.Host(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type listener struct{ net.Listener } func (listener) Addr() net.Addr { return nil } func TestNetServerNilAddr(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Server(addr, listener{}) expected := nc.Host(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func newTCPListener() (net.Listener, error) { return net.Listen("tcp4", "127.0.0.1:0") } func TestNetServerTCP(t *testing.T) { ln, err := newTCPListener() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() host, pStr, err := net.SplitHostPort(ln.Addr().String()) require.NoError(t, err) port, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Server("example.com:8080", ln) expected := []attribute.KeyValue{ nc.HostName("example.com"), nc.HostPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockHostAddrKey.String(host), nc.NetSockHostPortKey.Int(port), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func TestNetHost(t *testing.T) { testAddrs(t, []addrTest{ {address: "", expected: nil}, {address: "192.0.0.1", expected: []attribute.KeyValue{ nc.HostName("192.0.0.1"), }}, {address: "192.0.0.1:9090", expected: []attribute.KeyValue{ nc.HostName("192.0.0.1"), nc.HostPort(9090), }}, }, nc.Host) } func TestNetHostName(t *testing.T) { expected := attribute.Key("net.host.name").String(addr) assert.Equal(t, expected, nc.HostName(addr)) } func TestNetHostPort(t *testing.T) { expected := attribute.Key("net.host.port").Int(port) assert.Equal(t, expected, nc.HostPort(port)) } func TestNetClientNilConn(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Client(addr, nil) expected := nc.Peer(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type conn struct{ net.Conn } func (conn) LocalAddr() net.Addr { return nil } func (conn) RemoteAddr() net.Addr { return nil } func TestNetClientNilAddr(t *testing.T) { const addr = "127.0.0.1:8080" got := nc.Client(addr, conn{}) expected := nc.Peer(addr) assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func newTCPConn() (net.Conn, net.Listener, error) { ln, err := newTCPListener() if err != nil { return nil, nil, err } conn, err := net.Dial("tcp4", ln.Addr().String()) if err != nil { _ = ln.Close() return nil, nil, err } return conn, ln, nil } func TestNetClientTCP(t *testing.T) { conn, ln, err := newTCPConn() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() defer func() { require.NoError(t, conn.Close()) }() lHost, pStr, err := net.SplitHostPort(conn.LocalAddr().String()) require.NoError(t, err) lPort, err := strconv.Atoi(pStr) require.NoError(t, err) rHost, pStr, err := net.SplitHostPort(conn.RemoteAddr().String()) require.NoError(t, err) rPort, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Client("example.com:8080", conn) expected := []attribute.KeyValue{ nc.PeerName("example.com"), nc.PeerPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockPeerAddrKey.String(rHost), nc.NetSockPeerPortKey.Int(rPort), nc.NetSockHostAddrKey.String(lHost), nc.NetSockHostPortKey.Int(lPort), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } type remoteOnlyConn struct{ net.Conn } func (remoteOnlyConn) LocalAddr() net.Addr { return nil } func TestNetClientTCPNilLocal(t *testing.T) { conn, ln, err := newTCPConn() require.NoError(t, err) defer func() { require.NoError(t, ln.Close()) }() defer func() { require.NoError(t, conn.Close()) }() conn = remoteOnlyConn{conn} rHost, pStr, err := net.SplitHostPort(conn.RemoteAddr().String()) require.NoError(t, err) rPort, err := strconv.Atoi(pStr) require.NoError(t, err) got := nc.Client("example.com:8080", conn) expected := []attribute.KeyValue{ nc.PeerName("example.com"), nc.PeerPort(8080), nc.NetTransportTCP, nc.NetSockFamilyKey.String("inet"), nc.NetSockPeerAddrKey.String(rHost), nc.NetSockPeerPortKey.Int(rPort), } assert.Equal(t, cap(expected), cap(got), "slice capacity") assert.ElementsMatch(t, expected, got) } func TestNetPeer(t *testing.T) { testAddrs(t, []addrTest{ {address: "", expected: nil}, {address: "example.com", expected: []attribute.KeyValue{ nc.PeerName("example.com"), }}, {address: "/tmp/file", expected: []attribute.KeyValue{ nc.PeerName("/tmp/file"), }}, {address: "192.0.0.1", expected: []attribute.KeyValue{ nc.PeerName("192.0.0.1"), }}, {address: ":9090", expected: nil}, {address: "192.0.0.1:9090", expected: []attribute.KeyValue{ nc.PeerName("192.0.0.1"), nc.PeerPort(9090), }}, }, nc.Peer) } func TestNetPeerName(t *testing.T) { expected := attribute.Key("net.peer.name").String(addr) assert.Equal(t, expected, nc.PeerName(addr)) } func TestNetPeerPort(t *testing.T) { expected := attribute.Key("net.peer.port").Int(port) assert.Equal(t, expected, nc.PeerPort(port)) } func TestNetSockPeerName(t *testing.T) { expected := attribute.Key("net.sock.peer.addr").String(addr) assert.Equal(t, expected, nc.SockPeerAddr(addr)) } func TestNetSockPeerPort(t *testing.T) { expected := attribute.Key("net.sock.peer.port").Int(port) assert.Equal(t, expected, nc.SockPeerPort(port)) } func TestFamily(t *testing.T) { tests := []struct { network string address string expect string }{ {"", "", ""}, {"unix", "", "unix"}, {"unix", "gibberish", "unix"}, {"unixgram", "", "unix"}, {"unixgram", "gibberish", "unix"}, {"unixpacket", "gibberish", "unix"}, {"tcp", "123.0.2.8", "inet"}, {"tcp", "gibberish", ""}, {"", "123.0.2.8", "inet"}, {"", "gibberish", ""}, {"tcp", "fe80::1", "inet6"}, {"", "fe80::1", "inet6"}, } for _, test := range tests { got := family(test.network, test.address) assert.Equal(t, test.expect, got, test.network+"/"+test.address) } } func TestSplitHostPort(t *testing.T) { tests := []struct { hostport string host string port int }{ {"", "", -1}, {":8080", "", 8080}, {"127.0.0.1", "127.0.0.1", -1}, {"www.example.com", "www.example.com", -1}, {"127.0.0.1%25en0", "127.0.0.1%25en0", -1}, {"[]", "", -1}, // Ensure this doesn't panic. {"[fe80::1", "", -1}, {"[fe80::1]", "fe80::1", -1}, {"[fe80::1%25en0]", "fe80::1%25en0", -1}, {"[fe80::1]:8080", "fe80::1", 8080}, {"[fe80::1]::", "", -1}, // Too many colons. {"127.0.0.1:", "127.0.0.1", -1}, {"127.0.0.1:port", "127.0.0.1", -1}, {"127.0.0.1:8080", "127.0.0.1", 8080}, {"www.example.com:8080", "www.example.com", 8080}, {"127.0.0.1%25en0:8080", "127.0.0.1%25en0", 8080}, } for _, test := range tests { h, p := splitHostPort(test.hostport) assert.Equal(t, test.host, h, test.hostport) assert.Equal(t, test.port, p, test.hostport) } } type addrTest struct { address string expected []attribute.KeyValue } func testAddrs(t *testing.T, tests []addrTest, f func(string) []attribute.KeyValue) { t.Helper() for _, test := range tests { got := f(test.address) assert.Equal(t, cap(test.expected), cap(got), "slice capacity") assert.ElementsMatch(t, test.expected, got, test.address) } } opentelemetry-go-1.21.0/semconv/template.j2000066400000000000000000000122711452547353200206310ustar00rootroot00000000000000{%- macro keyval_method(type) -%} {%- if type == "string" -%} String {%- elif type == "string[]" -%} StringSlice {%- elif type == "int" -%} Int {%- elif type == "int[]" -%} IntSlice {%- elif type == "double" -%} Float64 {%- elif type == "double[]" -%} Float64Slice {%- elif type == "boolean" -%} Bool {%- elif type == "boolean[]" -%} BoolSlice {%- endif -%} {%- endmacro -%} {%- macro to_go_attr_type(type, val) -%} {{keyval_method(type)}}({% if type == "string" %}"{{val}}"{% else %}{{val}}{% endif %}) {%- endmacro -%} {%- macro to_go_name(fqn) -%} {{fqn | replace(".", " ") | replace("_", " ") | title | replace(" ", "")}} {%- endmacro -%} {%- macro it_reps(brief) -%} It represents {% if brief[:2] == "A " or brief[:3] == "An " or brief[:4] == "The " -%} {{ brief[0]|lower }}{{ brief[1:] }} {%- else -%} the {{ brief[0]|lower }}{{ brief[1:] }} {%- endif -%} {%- endmacro -%} {%- macro keydoc(attr) -%} {{ to_go_name(attr.fqn) }}Key is the attribute Key conforming to the "{{ attr.fqn }}" semantic conventions. {{ it_reps(attr.brief) }} {%- endmacro -%} {%- macro keydetails(attr) -%} {%- if attr.attr_type is string %} Type: {{ attr.attr_type }} {%- else %} Type: Enum {%- endif %} {%- if attr.requirement_level == RequirementLevel.REQUIRED %} RequirementLevel: Required {%- elif attr.requirement_level == RequirementLevel.CONDITIONALLY_REQUIRED %} RequirementLevel: ConditionallyRequired {%- if attr.requirement_level_msg != "" %} ({{ attr.requirement_level_msg }}){%- endif %} {%- elif attr.requirement_level == RequirementLevel.RECOMMENDED %} RequirementLevel: Recommended {%- if attr.requirement_level_msg != "" %} ({{ attr.requirement_level_msg }}){%- endif %} {%- else %} RequirementLevel: Optional {%- endif %} {{ attr.stability | replace("Level.", ": ") | capitalize }} {%- if attr.deprecated != None %} Deprecated: {{ attr.deprecated }} {%- endif %} {%- if attr.examples is iterable %} Examples: {{ attr.examples | pprint | trim("[]") }} {%- endif %} {%- if attr.note %} Note: {{ attr.note }} {%- endif %} {%- endmacro -%} {%- macro fndoc(attr) -%} // {{ to_go_name(attr.fqn) }} returns an attribute KeyValue conforming to the "{{ attr.fqn }}" semantic conventions. {{ it_reps(attr.brief) }} {%- endmacro -%} {%- macro to_go_func(type, name) -%} {%- if type == "string" -%} func {{name}}(val string) attribute.KeyValue { {%- elif type == "string[]" -%} func {{name}}(val ...string) attribute.KeyValue { {%- elif type == "int" -%} func {{name}}(val int) attribute.KeyValue { {%- elif type == "int[]" -%} func {{name}}(val ...int) attribute.KeyValue { {%- elif type == "double" -%} func {{name}}(val float64) attribute.KeyValue { {%- elif type == "double[]" -%} func {{name}}(val ...float64) attribute.KeyValue { {%- elif type == "boolean" -%} func {{name}}(val bool) attribute.KeyValue { {%- elif type == "boolean[]" -%} func {{name}}(val ...bool) attribute.KeyValue { {%- endif -%} return {{name}}Key.{{keyval_method(type)}}(val) } {%- endmacro -%} {%- macro sentence_case(text) -%} {{ text[0]|upper}}{{text[1:] }} {%- endmacro -%} // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import [[IMPORTPATH]] import "go.opentelemetry.io/otel/attribute" {% for semconv in semconvs -%} {%- if semconvs[semconv].attributes | rejectattr("ref") | selectattr("is_local") | sort(attribute=fqn) | length > 0 -%} // {{ sentence_case(semconvs[semconv].brief | replace("This document defines ", "")) | wordwrap(76, break_long_words=false, break_on_hyphens=false, wrapstring="\n// ") }} const ( {%- for attr in semconvs[semconv].attributes if attr.is_local and not attr.ref %} // {{ keydoc(attr) | wordwrap(72, break_long_words=false, break_on_hyphens=false, wrapstring="\n\t// ") }} // {{ keydetails(attr) | wordwrap(72, break_long_words=false, break_on_hyphens=false, wrapstring="\n\t// ") }} {{to_go_name(attr.fqn)}}Key = attribute.Key("{{attr.fqn}}") {% endfor -%} ) {%- for attr in semconvs[semconv].attributes if attr.is_local and not attr.ref -%} {%- if attr.attr_type is not string %} var ( {%- for val in attr.attr_type.members %} // {{ val.brief | to_doc_brief }} {{to_go_name("{}.{}".format(attr.fqn, val.member_id))}} = {{to_go_name(attr.fqn)}}Key.{{to_go_attr_type(attr.attr_type.enum_type, val.value)}} {%- endfor %} ) {%- endif -%} {%- endfor %} {%- for attr in semconvs[semconv].attributes if attr.is_local and not attr.ref -%} {%- if attr.attr_type is string %} {{ fndoc(attr) | wordwrap(76, break_long_words=false, break_on_hyphens=false, wrapstring="\n// ") }} {{to_go_func(attr.attr_type, to_go_name(attr.fqn))}} {%- endif -%} {%- endfor %} {% endif %} {% endfor -%} opentelemetry-go-1.21.0/semconv/v1.10.0/000077500000000000000000000000001452547353200174615ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.10.0/doc.go000066400000000000000000000016641452547353200205640ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.10.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0" opentelemetry-go-1.21.0/semconv/v1.10.0/exception.go000066400000000000000000000014301452547353200220040ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.10.0/http.go000066400000000000000000000113121452547353200207650ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.10.0/resource.go000066400000000000000000001013201452547353200216340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0" import "go.opentelemetry.io/otel/attribute" // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // The name of the device manufacturer // // Type: string // Required: No // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback function (which // may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) span attributes). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: Depending on the cloud provider, use: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) with the resolved function version, as the same runtime instance // may be invokable with multiple // different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id). // On some providers, it may not be possible to determine the full ID at startup, // which is why this field cannot be made required. For example, on AWS the // account ID // part of the ARN is not available without calling another AWS API // which may be deemed too slow for a short-running lambda function. // As an alternative, consider setting `faas.id` as a span attribute instead. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // Required: No // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // Required: No // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.10.0/schema.go000066400000000000000000000017141452547353200212530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.10.0" opentelemetry-go-1.21.0/semconv/v1.10.0/trace.go000066400000000000000000001671221452547353200211170ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.10.0" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. const ( // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec // .md#id) uniquely identifies the event. // // Type: string // Required: Always // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m // d#source-1) identifies the context in which an event happened. // // Type: string // Required: Always // Stability: stable // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- // service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // The [version of the CloudEvents specification](https://github.com/cloudevents/s // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. // // Type: string // Required: Always // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp // ec.md#type) contains a value describing the type of event related to the // originating occurrence. // // Type: string // Required: Always // Stability: stable // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. // md#subject) of the event in the context of the event producer (identified by // source). // // Type: string // Required: No // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // This document defines semantic conventions for the OpenTracing Shim const ( // Parent-child Reference type // // Type: Enum // Required: No // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // Required: Required, if applicable. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#recording-an-exception). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // Required: No // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // The internet connection type currently being used by the host. // // Type: Enum // Required: No // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // Required: No // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // Required: No // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // Required: No // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // Required: No // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // Required: No // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header // should also be reported, see note. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' // Note: When the header is present but empty the attribute SHOULD be set to the // empty string. Note that this is a valid situation that is expected in certain // cases, according the aforementioned [section of RFC // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not // set the attribute MUST NOT be set. HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") // The ordinal number of request re-sending attempt. // // Type: int // Required: If and only if a request was retried. // Stability: stable // Examples: 3 HTTPRetryCountKey = attribute.Key("http.retry_count") ) var ( // HTTP 1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP 1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP 2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.peer.ip`, is available even if that other // source just confirms the same value as `net.peer.ip`. // Rationale: For `net.peer.ip`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.peer.ip` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // Required: No // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // Required: Always // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // Required: Always // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // Required: Always // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // Type of message. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // Required: No // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // Required: No // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. See below for a list of well-known // identifiers. // // Type: Enum // Required: Always // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPC received/sent message. const ( // Whether this is a received or sent message. // // Type: Enum // Required: No // Stability: stable MessageTypeKey = attribute.Key("message.type") // MUST be calculated as two different counters starting from `1` one for sent // messages and one for received message. // // Type: int // Required: No // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // Compressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // Uncompressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) opentelemetry-go-1.21.0/semconv/v1.11.0/000077500000000000000000000000001452547353200174625ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.11.0/doc.go000066400000000000000000000016641452547353200205650ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.11.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.11.0" opentelemetry-go-1.21.0/semconv/v1.11.0/exception.go000066400000000000000000000014301452547353200220050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.11.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.11.0/http.go000066400000000000000000000113121452547353200207660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.11.0" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.11.0/resource.go000066400000000000000000001013271452547353200216440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.11.0" import "go.opentelemetry.io/otel/attribute" // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // The name of the device manufacturer // // Type: string // Required: No // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback function (which // may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) span attributes). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: Depending on the cloud provider, use: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) with the resolved function version, as the same runtime instance // may be invokable with multiple // different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id). // On some providers, it may not be possible to determine the full ID at startup, // which is why this field cannot be made required. For example, on AWS the // account ID // part of the ARN is not available without calling another AWS API // which may be deemed too slow for a short-running lambda function. // As an alternative, consider setting `faas.id` as a span attribute instead. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // Required: No // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // Required: No // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.11.0/schema.go000066400000000000000000000017141452547353200212540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.11.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.11.0" opentelemetry-go-1.21.0/semconv/v1.11.0/trace.go000066400000000000000000001673601452547353200211240ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.11.0" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. const ( // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec // .md#id) uniquely identifies the event. // // Type: string // Required: Always // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m // d#source-1) identifies the context in which an event happened. // // Type: string // Required: Always // Stability: stable // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- // service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // The [version of the CloudEvents specification](https://github.com/cloudevents/s // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. // // Type: string // Required: Always // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp // ec.md#type) contains a value describing the type of event related to the // originating occurrence. // // Type: string // Required: Always // Stability: stable // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. // md#subject) of the event in the context of the event producer (identified by // source). // // Type: string // Required: No // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // This document defines semantic conventions for the OpenTracing Shim const ( // Parent-child Reference type // // Type: Enum // Required: No // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // Required: Required, if applicable. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#recording-an-exception). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // Required: No // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra // DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // The internet connection type currently being used by the host. // // Type: Enum // Required: No // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // Required: No // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // Required: No // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // Required: No // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // Required: No // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // Required: No // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header // should also be reported, see note. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' // Note: When the header is present but empty the attribute SHOULD be set to the // empty string. Note that this is a valid situation that is expected in certain // cases, according the aforementioned [section of RFC // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not // set the attribute MUST NOT be set. HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") // The ordinal number of request re-sending attempt. // // Type: int // Required: If and only if a request was retried. // Stability: stable // Examples: 3 HTTPRetryCountKey = attribute.Key("http.retry_count") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.peer.ip`, is available even if that other // source just confirms the same value as `net.peer.ip`. // Rationale: For `net.peer.ip`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.peer.ip` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // Required: No // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // Required: Always // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // Required: Always // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // Required: Always // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // Type of message. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // Required: No // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // Required: No // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. See below for a list of well-known // identifiers. // // Type: Enum // Required: Always // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPC received/sent message. const ( // Whether this is a received or sent message. // // Type: Enum // Required: No // Stability: stable MessageTypeKey = attribute.Key("message.type") // MUST be calculated as two different counters starting from `1` one for sent // messages and one for received message. // // Type: int // Required: No // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // Compressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // Uncompressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) opentelemetry-go-1.21.0/semconv/v1.12.0/000077500000000000000000000000001452547353200174635ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.12.0/doc.go000066400000000000000000000016641452547353200205660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.12.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" opentelemetry-go-1.21.0/semconv/v1.12.0/exception.go000066400000000000000000000014301452547353200220060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.12.0/http.go000066400000000000000000000113121452547353200207670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.12.0/resource.go000066400000000000000000001073251452547353200216510ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). const ( // Array of brand name and version separated by a space // // Type: string[] // Required: No // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (navigator.userAgentData.brands). BrowserBrandsKey = attribute.Key("browser.brands") // The platform on which the browser is running // // Type: string // Required: No // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (navigator.userAgentData.platform). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD // be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in the // [os.type and os.name attributes](./os.md). However, for consistency, the values // in the `browser.platform` attribute should capture the exact value that the // user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // Full user-agent string provided by the browser // // Type: string // Required: No // Stability: stable // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 // (KHTML, ' // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' // Note: The user-agent value SHOULD be provided only from browsers that do not // have a mechanism to retrieve brands and platform individually from the User- // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` // API can be used. BrowserUserAgentKey = attribute.Key("browser.user_agent") ) // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // The name of the device manufacturer // // Type: string // Required: No // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) // span attributes). // For some cloud providers, the above definition is ambiguous. The following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud providers/products: // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `faas.id` attribute). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: On some cloud providers, it may not be possible to determine the full ID // at startup, // so consider setting `faas.id` as a span attribute instead. // The exact value to use for `faas.id` depends on the cloud provider: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) // with the resolved function version, as the same runtime instance may be // invokable with // multiple different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id) of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.We // b/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share // a TracerProvider. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // Required: No // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // Required: No // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.12.0/schema.go000066400000000000000000000017141452547353200212550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.12.0" opentelemetry-go-1.21.0/semconv/v1.12.0/trace.go000066400000000000000000001673601452547353200211250ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. const ( // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec // .md#id) uniquely identifies the event. // // Type: string // Required: Always // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m // d#source-1) identifies the context in which an event happened. // // Type: string // Required: Always // Stability: stable // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- // service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // The [version of the CloudEvents specification](https://github.com/cloudevents/s // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. // // Type: string // Required: Always // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp // ec.md#type) contains a value describing the type of event related to the // originating occurrence. // // Type: string // Required: Always // Stability: stable // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. // md#subject) of the event in the context of the event producer (identified by // source). // // Type: string // Required: No // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // This document defines semantic conventions for the OpenTracing Shim const ( // Parent-child Reference type // // Type: Enum // Required: No // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // Required: Required, if applicable. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#recording-an-exception). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // Required: No // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra // DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // The internet connection type currently being used by the host. // // Type: Enum // Required: No // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // Required: No // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // Required: No // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // Required: No // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // Required: No // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // Required: No // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header // should also be reported, see note. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' // Note: When the header is present but empty the attribute SHOULD be set to the // empty string. Note that this is a valid situation that is expected in certain // cases, according the aforementioned [section of RFC // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not // set the attribute MUST NOT be set. HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") // The ordinal number of request re-sending attempt. // // Type: int // Required: If and only if a request was retried. // Stability: stable // Examples: 3 HTTPRetryCountKey = attribute.Key("http.retry_count") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.peer.ip`, is available even if that other // source just confirms the same value as `net.peer.ip`. // Rationale: For `net.peer.ip`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.peer.ip` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // Required: No // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // Required: Always // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // Required: Always // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // Required: Always // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // Type of message. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // Required: No // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // Required: No // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. See below for a list of well-known // identifiers. // // Type: Enum // Required: Always // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPC received/sent message. const ( // Whether this is a received or sent message. // // Type: Enum // Required: No // Stability: stable MessageTypeKey = attribute.Key("message.type") // MUST be calculated as two different counters starting from `1` one for sent // messages and one for received message. // // Type: int // Required: No // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // Compressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // Uncompressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) opentelemetry-go-1.21.0/semconv/v1.13.0/000077500000000000000000000000001452547353200174645ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.13.0/doc.go000066400000000000000000000016641452547353200205670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.13.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.13.0" opentelemetry-go-1.21.0/semconv/v1.13.0/exception.go000066400000000000000000000014301452547353200220070ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.13.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.13.0/http.go000066400000000000000000000014401452547353200207710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.13.0" // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) opentelemetry-go-1.21.0/semconv/v1.13.0/httpconv/000077500000000000000000000000001452547353200213315ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.13.0/httpconv/http.go000066400000000000000000000146571452547353200226540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. package httpconv // import "go.opentelemetry.io/otel/semconv/v1.13.0/httpconv" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.13.0" ) var ( nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } hc = &internal.HTTPConv{ NetConv: nc, EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, HTTPRouteKey: semconv.HTTPRouteKey, HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, HTTPUserAgentKey: semconv.HTTPUserAgentKey, } ) // ClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", // "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func ClientResponse(resp *http.Response) []attribute.KeyValue { return hc.ClientResponse(resp) } // ClientRequest returns trace attributes for an HTTP request made by a client. // The following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func ClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func ClientStatus(code int) (codes.Code, string) { return hc.ClientStatus(code) } // ServerRequest returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func ServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func ServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } // RequestHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func RequestHeader(h http.Header) []attribute.KeyValue { return hc.RequestHeader(h) } // ResponseHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func ResponseHeader(h http.Header) []attribute.KeyValue { return hc.ResponseHeader(h) } opentelemetry-go-1.21.0/semconv/v1.13.0/netconv/000077500000000000000000000000001452547353200211405ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.13.0/netconv/net.go000066400000000000000000000053211452547353200222560ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconv provides OpenTelemetry network semantic conventions for // tracing telemetry. package netconv // import "go.opentelemetry.io/otel/semconv/v1.13.0/netconv" import ( "net" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.13.0" ) var nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetSockHostAddrKey: semconv.NetSockHostAddrKey, NetSockHostPortKey: semconv.NetSockHostPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } // Transport returns a trace attribute describing the transport protocol of the // passed network. See the net.Dial for information about acceptable network // values. func Transport(network string) attribute.KeyValue { return nc.Transport(network) } // Client returns trace attributes for a client network connection to address. // See net.Dial for information about acceptable address values, address should // be the same as the one used to create conn. If conn is nil, only network // peer attributes will be returned that describe address. Otherwise, the // socket level information about conn will also be included. func Client(address string, conn net.Conn) []attribute.KeyValue { return nc.Client(address, conn) } // Server returns trace attributes for a network listener listening at address. // See net.Listen for information about acceptable address values, address // should be the same as the one used to create ln. If ln is nil, only network // host attributes will be returned that describe address. Otherwise, the // socket level information about ln will also be included. func Server(address string, ln net.Listener) []attribute.KeyValue { return nc.Server(address, ln) } opentelemetry-go-1.21.0/semconv/v1.13.0/resource.go000066400000000000000000001124041452547353200216440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.13.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). const ( // Array of brand name and version separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (navigator.userAgentData.brands). BrowserBrandsKey = attribute.Key("browser.brands") // The platform on which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (navigator.userAgentData.platform). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD // be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in the // [os.type and os.name attributes](./os.md). However, for consistency, the values // in the `browser.platform` attribute should capture the exact value that the // user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // Full user-agent string provided by the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 // (KHTML, ' // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' // Note: The user-agent value SHOULD be provided only from browsers that do not // have a mechanism to retrieve brands and platform individually from the User- // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` // API can be used. BrowserUserAgentKey = attribute.Key("browser.user_agent") ) // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // The name of the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) // span attributes). // For some cloud providers, the above definition is ambiguous. The following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud providers/products: // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `faas.id` attribute). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: On some cloud providers, it may not be possible to determine the full ID // at startup, // so consider setting `faas.id` as a span attribute instead. // The exact value to use for `faas.id` depends on the cloud provider: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) // with the resolved function version, as the same runtime instance may be // invokable with // multiple different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id) of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.We // b/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share // a TracerProvider. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // Parent Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // RequirementLevel: Optional // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.13.0/schema.go000066400000000000000000000017141452547353200212560ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.13.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.13.0" opentelemetry-go-1.21.0/semconv/v1.13.0/trace.go000066400000000000000000001777251452547353200211340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.13.0" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. const ( // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec // .md#id) uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m // d#source-1) identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- // service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // The [version of the CloudEvents specification](https://github.com/cloudevents/s // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp // ec.md#type) contains a value describing the type of event related to the // originating occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. // md#subject) of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // This document defines semantic conventions for the OpenTracing Shim const ( // Parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // RequirementLevel: ConditionallyRequired (If applicable and not explicitly // disabled via instrumentation configuration.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default database // (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#recording-an-exception). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like AWS or // GCP, the region in which a function is hosted is essential to uniquely identify // the function and also part of its endpoint. Since it's part of the endpoint // being called, the region is always known to clients. In these cases, // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the // client or not required for identifying the invoked function, setting // `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") // Application layer protocol used. The value SHOULD be normalized to lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") // Version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `net.app.protocol.version` refers to the version of the protocol used and // might be different from the protocol client's version. If the HTTP client used // has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should // be set to `1.1`. NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") // Remote socket peer name. // // Type: string // RequirementLevel: Recommended (If available and different from `net.peer.name` // and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local // communication, [etc](https://man7.org/linux/man- // pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // Remote socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.peer.port` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // Protocol [address family](https://man7.org/linux/man- // pages/man7/address_families.7.html) which is used for communication. // // Type: Enum // RequirementLevel: ConditionallyRequired (If different than `inet` and if any of // `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry // SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") // Logical remote hostname, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra // DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // Logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Logical local hostname or similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // Logical local port number, preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // Local socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // Local socket port number. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.host.port` and if `net.sock.host.addr` is set.) // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") // The internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User-Agent](https://www.rfc- // editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- // length) header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- // length) header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Client const ( // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The ordinal number of request re-sending attempt. // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 HTTPRetryCountKey = attribute.Key("http.retry_count") ) // Semantic Convention for HTTP Server const ( // The URI scheme identifying the used protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/path/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") // The matched route (path template in the format used by the respective server // framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP // server framework as the route attribute should have low-cardinality and the URI // path can NOT substitute it. HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.sock.peer.addr`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.sock.peer.addr`, is available even if that other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.sock.peer.addr` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. const ( // The name of the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // The type of the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // The GraphQL document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // RequirementLevel: ConditionallyRequired (If the message destination is either a // `queue` or `topic`.) // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the // value is assumed to be `false`.) // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the // value is assumed to be `false`.) // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // Type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default version // (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPC received/sent message. const ( // Whether this is a received or sent message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessageTypeKey = attribute.Key("message.type") // MUST be calculated as two different counters starting from `1` one for sent // messages and one for received message. // // Type: int // RequirementLevel: Optional // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // Compressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // Uncompressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) opentelemetry-go-1.21.0/semconv/v1.14.0/000077500000000000000000000000001452547353200174655ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.14.0/doc.go000066400000000000000000000016641452547353200205700ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.14.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.14.0" opentelemetry-go-1.21.0/semconv/v1.14.0/exception.go000066400000000000000000000014301452547353200220100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.14.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.14.0/http.go000066400000000000000000000014401452547353200207720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.14.0" // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) opentelemetry-go-1.21.0/semconv/v1.14.0/httpconv/000077500000000000000000000000001452547353200213325ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.14.0/httpconv/http.go000066400000000000000000000146571452547353200226550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. package httpconv // import "go.opentelemetry.io/otel/semconv/v1.14.0/httpconv" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.14.0" ) var ( nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } hc = &internal.HTTPConv{ NetConv: nc, EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, HTTPRouteKey: semconv.HTTPRouteKey, HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, HTTPUserAgentKey: semconv.HTTPUserAgentKey, } ) // ClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", // "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func ClientResponse(resp *http.Response) []attribute.KeyValue { return hc.ClientResponse(resp) } // ClientRequest returns trace attributes for an HTTP request made by a client. // The following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func ClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func ClientStatus(code int) (codes.Code, string) { return hc.ClientStatus(code) } // ServerRequest returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func ServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func ServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } // RequestHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func RequestHeader(h http.Header) []attribute.KeyValue { return hc.RequestHeader(h) } // ResponseHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func ResponseHeader(h http.Header) []attribute.KeyValue { return hc.ResponseHeader(h) } opentelemetry-go-1.21.0/semconv/v1.14.0/netconv/000077500000000000000000000000001452547353200211415ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.14.0/netconv/net.go000066400000000000000000000053211452547353200222570ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconv provides OpenTelemetry network semantic conventions for // tracing telemetry. package netconv // import "go.opentelemetry.io/otel/semconv/v1.14.0/netconv" import ( "net" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.14.0" ) var nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetSockHostAddrKey: semconv.NetSockHostAddrKey, NetSockHostPortKey: semconv.NetSockHostPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } // Transport returns a trace attribute describing the transport protocol of the // passed network. See the net.Dial for information about acceptable network // values. func Transport(network string) attribute.KeyValue { return nc.Transport(network) } // Client returns trace attributes for a client network connection to address. // See net.Dial for information about acceptable address values, address should // be the same as the one used to create conn. If conn is nil, only network // peer attributes will be returned that describe address. Otherwise, the // socket level information about conn will also be included. func Client(address string, conn net.Conn) []attribute.KeyValue { return nc.Client(address, conn) } // Server returns trace attributes for a network listener listening at address. // See net.Listen for information about acceptable address values, address // should be the same as the one used to create ln. If ln is nil, only network // host attributes will be returned that describe address. Otherwise, the // socket level information about ln will also be included. func Server(address string, ln net.Listener) []attribute.KeyValue { return nc.Server(address, ln) } opentelemetry-go-1.21.0/semconv/v1.14.0/resource.go000066400000000000000000001137671452547353200216620ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.14.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). const ( // Array of brand name and version separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") // The platform on which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD // be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in the // [`os.type` and `os.name` attributes](./os.md). However, for consistency, the // values in the `browser.platform` attribute should capture the exact value that // the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // A boolean that is true if the browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be // left unset. BrowserMobileKey = attribute.Key("browser.mobile") // Full user-agent string provided by the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 // (KHTML, ' // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' // Note: The user-agent value SHOULD be provided only from browsers that do not // have a mechanism to retrieve brands and platform individually from the User- // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` // API can be used. BrowserUserAgentKey = attribute.Key("browser.user_agent") // Preferred language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'en', 'en-US', 'fr', 'fr-FR' // Note: This value is intended to be taken from the Navigator API // `navigator.language`. BrowserLanguageKey = attribute.Key("browser.language") ) // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // The name of the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) // span attributes). // For some cloud providers, the above definition is ambiguous. The following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud providers/products: // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `faas.id` attribute). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: On some cloud providers, it may not be possible to determine the full ID // at startup, // so consider setting `faas.id` as a span attribute instead. // The exact value to use for `faas.id` depends on the cloud provider: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) // with the resolved function version, as the same runtime instance may be // invokable with // multiple different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id) of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.We // b/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share // a TracerProvider. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // Parent Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // RequirementLevel: Optional // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.14.0/schema.go000066400000000000000000000017141452547353200212570ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.14.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.14.0" opentelemetry-go-1.21.0/semconv/v1.14.0/trace.go000066400000000000000000001751771452547353200211340ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.14.0" import "go.opentelemetry.io/otel/attribute" // This document defines the shared attributes used to report a single exception associated with a span or log. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) // This document defines attributes for Events represented using Log Records. const ( // The name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") // The domain identifies the context in which an event happened. An event name is // unique only within a domain. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: An `event.name` is supposed to be unique only in the context of an // `event.domain`, so this allows for two events in different domains to // have same `event.name`, yet be unrelated events. EventDomainKey = attribute.Key("event.domain") ) var ( // Events from browser apps EventDomainBrowser = EventDomainKey.String("browser") // Events from mobile apps EventDomainDevice = EventDomainKey.String("device") // Events from Kubernetes EventDomainK8S = EventDomainKey.String("k8s") ) // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. const ( // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec // .md#id) uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m // d#source-1) identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- // service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // The [version of the CloudEvents specification](https://github.com/cloudevents/s // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp // ec.md#type) contains a value describing the type of event related to the // originating occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. // md#subject) of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // This document defines semantic conventions for the OpenTracing Shim const ( // Parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // RequirementLevel: ConditionallyRequired (If applicable and not explicitly // disabled via instrumentation configuration.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default database // (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like AWS or // GCP, the region in which a function is hosted is essential to uniquely identify // the function and also part of its endpoint. Since it's part of the endpoint // being called, the region is always known to clients. In these cases, // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the // client or not required for identifying the invoked function, setting // `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") // Application layer protocol used. The value SHOULD be normalized to lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") // Version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `net.app.protocol.version` refers to the version of the protocol used and // might be different from the protocol client's version. If the HTTP client used // has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should // be set to `1.1`. NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") // Remote socket peer name. // // Type: string // RequirementLevel: Recommended (If available and different from `net.peer.name` // and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local // communication, [etc](https://man7.org/linux/man- // pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // Remote socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.peer.port` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // Protocol [address family](https://man7.org/linux/man- // pages/man7/address_families.7.html) which is used for communication. // // Type: Enum // RequirementLevel: ConditionallyRequired (If different than `inet` and if any of // `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry // SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") // Logical remote hostname, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra // DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // Logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Logical local hostname or similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // Logical local port number, preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // Local socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // Local socket port number. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.host.port` and if `net.sock.host.addr` is set.) // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") // The internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User-Agent](https://www.rfc- // editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- // length) header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- // length) header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Client const ( // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The ordinal number of request re-sending attempt. // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 HTTPRetryCountKey = attribute.Key("http.retry_count") ) // Semantic Convention for HTTP Server const ( // The URI scheme identifying the used protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/path/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") // The matched route (path template in the format used by the respective server // framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP // server framework as the route attribute should have low-cardinality and the URI // path can NOT substitute it. HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.sock.peer.addr`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.sock.peer.addr`, is available even if that other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.sock.peer.addr` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. const ( // The name of the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // The type of the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // The GraphQL document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // RequirementLevel: ConditionallyRequired (If the message destination is either a // `queue` or `topic`.) // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the // value is assumed to be `false`.) // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the // value is assumed to be `false`.) // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // Type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default version // (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) opentelemetry-go-1.21.0/semconv/v1.15.0/000077500000000000000000000000001452547353200174665ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.15.0/doc.go000066400000000000000000000016641452547353200205710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.15.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.15.0" opentelemetry-go-1.21.0/semconv/v1.15.0/exception.go000066400000000000000000000014301452547353200220110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.15.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.15.0/http.go000066400000000000000000000014401452547353200207730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.15.0" // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) opentelemetry-go-1.21.0/semconv/v1.15.0/httpconv/000077500000000000000000000000001452547353200213335ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.15.0/httpconv/http.go000066400000000000000000000146571452547353200226560ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. package httpconv // import "go.opentelemetry.io/otel/semconv/v1.15.0/httpconv" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.15.0" ) var ( nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } hc = &internal.HTTPConv{ NetConv: nc, EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, HTTPRouteKey: semconv.HTTPRouteKey, HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, HTTPUserAgentKey: semconv.HTTPUserAgentKey, } ) // ClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", // "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func ClientResponse(resp *http.Response) []attribute.KeyValue { return hc.ClientResponse(resp) } // ClientRequest returns trace attributes for an HTTP request made by a client. // The following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func ClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func ClientStatus(code int) (codes.Code, string) { return hc.ClientStatus(code) } // ServerRequest returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func ServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func ServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } // RequestHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func RequestHeader(h http.Header) []attribute.KeyValue { return hc.RequestHeader(h) } // ResponseHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func ResponseHeader(h http.Header) []attribute.KeyValue { return hc.ResponseHeader(h) } opentelemetry-go-1.21.0/semconv/v1.15.0/netconv/000077500000000000000000000000001452547353200211425ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.15.0/netconv/net.go000066400000000000000000000053211452547353200222600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconv provides OpenTelemetry network semantic conventions for // tracing telemetry. package netconv // import "go.opentelemetry.io/otel/semconv/v1.15.0/netconv" import ( "net" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.15.0" ) var nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetSockHostAddrKey: semconv.NetSockHostAddrKey, NetSockHostPortKey: semconv.NetSockHostPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } // Transport returns a trace attribute describing the transport protocol of the // passed network. See the net.Dial for information about acceptable network // values. func Transport(network string) attribute.KeyValue { return nc.Transport(network) } // Client returns trace attributes for a client network connection to address. // See net.Dial for information about acceptable address values, address should // be the same as the one used to create conn. If conn is nil, only network // peer attributes will be returned that describe address. Otherwise, the // socket level information about conn will also be included. func Client(address string, conn net.Conn) []attribute.KeyValue { return nc.Client(address, conn) } // Server returns trace attributes for a network listener listening at address. // See net.Listen for information about acceptable address values, address // should be the same as the one used to create ln. If ln is nil, only network // host attributes will be returned that describe address. Otherwise, the // socket level information about ln will also be included. func Server(address string, ln net.Listener) []attribute.KeyValue { return nc.Server(address, ln) } opentelemetry-go-1.21.0/semconv/v1.15.0/resource.go000066400000000000000000001162011452547353200216450ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.15.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). const ( // Array of brand name and version separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") // The platform on which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD // be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in the // [`os.type` and `os.name` attributes](./os.md). However, for consistency, the // values in the `browser.platform` attribute should capture the exact value that // the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // A boolean that is true if the browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be // left unset. BrowserMobileKey = attribute.Key("browser.mobile") // Full user-agent string provided by the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 // (KHTML, ' // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' // Note: The user-agent value SHOULD be provided only from browsers that do not // have a mechanism to retrieve brands and platform individually from the User- // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` // API can be used. BrowserUserAgentKey = attribute.Key("browser.user_agent") // Preferred language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'en', 'en-US', 'fr', 'fr-FR' // Note: This value is intended to be taken from the Navigator API // `navigator.language`. BrowserLanguageKey = attribute.Key("browser.language") ) // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // The name of the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) // span attributes). // For some cloud providers, the above definition is ambiguous. The following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud providers/products: // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `faas.id` attribute). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: On some cloud providers, it may not be possible to determine the full ID // at startup, // so consider setting `faas.id` as a span attribute instead. // The exact value to use for `faas.id` depends on the cloud provider: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) // with the resolved function version, as the same runtime instance may be // invokable with // multiple different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id) of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.We // b/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share // a TracerProvider. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // Parent Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // RequirementLevel: Optional // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) // Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. const ( // The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' OtelScopeNameKey = attribute.Key("otel.scope.name") // The version of the instrumentation scope - (`InstrumentationScope.Version` in // OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0.0' OtelScopeVersionKey = attribute.Key("otel.scope.version") ) // Span attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. const ( // Deprecated, use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' OtelLibraryNameKey = attribute.Key("otel.library.name") // Deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '1.0.0' OtelLibraryVersionKey = attribute.Key("otel.library.version") ) opentelemetry-go-1.21.0/semconv/v1.15.0/schema.go000066400000000000000000000017141452547353200212600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.15.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.15.0" opentelemetry-go-1.21.0/semconv/v1.15.0/trace.go000066400000000000000000002015211452547353200211140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.15.0" import "go.opentelemetry.io/otel/attribute" // This document defines the shared attributes used to report a single exception associated with a span or log. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) // This document defines attributes for Events represented using Log Records. const ( // The name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") // The domain identifies the context in which an event happened. An event name is // unique only within a domain. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: An `event.name` is supposed to be unique only in the context of an // `event.domain`, so this allows for two events in different domains to // have same `event.name`, yet be unrelated events. EventDomainKey = attribute.Key("event.domain") ) var ( // Events from browser apps EventDomainBrowser = EventDomainKey.String("browser") // Events from mobile apps EventDomainDevice = EventDomainKey.String("device") // Events from Kubernetes EventDomainK8S = EventDomainKey.String("k8s") ) // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. const ( // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec // .md#id) uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m // d#source-1) identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- // service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // The [version of the CloudEvents specification](https://github.com/cloudevents/s // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp // ec.md#type) contains a value describing the type of event related to the // originating occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. // md#subject) of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // This document defines semantic conventions for the OpenTracing Shim const ( // Parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // RequirementLevel: ConditionallyRequired (If applicable and not explicitly // disabled via instrumentation configuration.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default database // (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's concepts. const ( // Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is // UNSET. // // Type: Enum // RequirementLevel: Optional // Stability: stable OtelStatusCodeKey = attribute.Key("otel.status_code") // Description of the Status if it has a value, otherwise not set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'resource not found' OtelStatusDescriptionKey = attribute.Key("otel.status_description") ) var ( // The operation has been validated by an Application developer or Operator to have completed successfully OtelStatusCodeOk = OtelStatusCodeKey.String("OK") // The operation contains an error OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like AWS or // GCP, the region in which a function is hosted is essential to uniquely identify // the function and also part of its endpoint. Since it's part of the endpoint // being called, the region is always known to clients. In these cases, // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the // client or not required for identifying the invoked function, setting // `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") // Application layer protocol used. The value SHOULD be normalized to lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") // Version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `net.app.protocol.version` refers to the version of the protocol used and // might be different from the protocol client's version. If the HTTP client used // has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should // be set to `1.1`. NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") // Remote socket peer name. // // Type: string // RequirementLevel: Recommended (If available and different from `net.peer.name` // and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local // communication, [etc](https://man7.org/linux/man- // pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // Remote socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.peer.port` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // Protocol [address family](https://man7.org/linux/man- // pages/man7/address_families.7.html) which is used for communication. // // Type: Enum // RequirementLevel: ConditionallyRequired (If different than `inet` and if any of // `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry // SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") // Logical remote hostname, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra // DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // Logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Logical local hostname or similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // Logical local port number, preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // Local socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // Local socket port number. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.host.port` and if `net.sock.host.addr` is set.) // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") // The internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User-Agent](https://www.rfc- // editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- // length) header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- // length) header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Client const ( // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The ordinal number of request resending attempt (for any reason, including // redirects). // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 // Note: The resend count SHOULD be updated each time an HTTP request gets resent // by the client, regardless of what was the cause of the resending (e.g. // redirection, authorization failure, 503 Server Unavailable, network issues, or // any other). HTTPResendCountKey = attribute.Key("http.resend_count") ) // Semantic Convention for HTTP Server const ( // The URI scheme identifying the used protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/path/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") // The matched route (path template in the format used by the respective server // framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP // server framework as the route attribute should have low-cardinality and the URI // path can NOT substitute it. HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.sock.peer.addr`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.sock.peer.addr`, is available even if that other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.sock.peer.addr` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. const ( // The name of the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // The type of the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // The GraphQL document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // RequirementLevel: ConditionallyRequired (If the message destination is either a // `queue` or `topic`.) // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the // value is assumed to be `false`.) // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the // value is assumed to be `false`.) // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // The timestamp in milliseconds that the delay message is expected to be // delivered to consumer. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay and delay // time level is not specified.) // Stability: stable // Examples: 1665987217045 MessagingRocketmqDeliveryTimestampKey = attribute.Key("messaging.rocketmq.delivery_timestamp") // The delay time level for delay message, which determines the message delay // time. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay and // delivery timestamp is not specified.) // Stability: stable // Examples: 3 MessagingRocketmqDelayTimeLevelKey = attribute.Key("messaging.rocketmq.delay_time_level") // It is essential for FIFO message. Messages that belong to the same message // group are always processed one by one within the same consumer group. // // Type: string // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) // Stability: stable // Examples: 'myMessageGroup' MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message_group") // Type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default version // (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) opentelemetry-go-1.21.0/semconv/v1.16.0/000077500000000000000000000000001452547353200174675ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.16.0/doc.go000066400000000000000000000016641452547353200205720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.16.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.16.0" opentelemetry-go-1.21.0/semconv/v1.16.0/exception.go000066400000000000000000000014301452547353200220120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.16.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.16.0/http.go000066400000000000000000000014401452547353200207740ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.16.0" // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) opentelemetry-go-1.21.0/semconv/v1.16.0/httpconv/000077500000000000000000000000001452547353200213345ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.16.0/httpconv/http.go000066400000000000000000000146571452547353200226570ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. package httpconv // import "go.opentelemetry.io/otel/semconv/v1.16.0/httpconv" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.16.0" ) var ( nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } hc = &internal.HTTPConv{ NetConv: nc, EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, HTTPRouteKey: semconv.HTTPRouteKey, HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, HTTPUserAgentKey: semconv.HTTPUserAgentKey, } ) // ClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", // "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func ClientResponse(resp *http.Response) []attribute.KeyValue { return hc.ClientResponse(resp) } // ClientRequest returns trace attributes for an HTTP request made by a client. // The following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func ClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func ClientStatus(code int) (codes.Code, string) { return hc.ClientStatus(code) } // ServerRequest returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func ServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func ServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } // RequestHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func RequestHeader(h http.Header) []attribute.KeyValue { return hc.RequestHeader(h) } // ResponseHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func ResponseHeader(h http.Header) []attribute.KeyValue { return hc.ResponseHeader(h) } opentelemetry-go-1.21.0/semconv/v1.16.0/netconv/000077500000000000000000000000001452547353200211435ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.16.0/netconv/net.go000066400000000000000000000053211452547353200222610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconv provides OpenTelemetry network semantic conventions for // tracing telemetry. package netconv // import "go.opentelemetry.io/otel/semconv/v1.16.0/netconv" import ( "net" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.16.0" ) var nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetSockHostAddrKey: semconv.NetSockHostAddrKey, NetSockHostPortKey: semconv.NetSockHostPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } // Transport returns a trace attribute describing the transport protocol of the // passed network. See the net.Dial for information about acceptable network // values. func Transport(network string) attribute.KeyValue { return nc.Transport(network) } // Client returns trace attributes for a client network connection to address. // See net.Dial for information about acceptable address values, address should // be the same as the one used to create conn. If conn is nil, only network // peer attributes will be returned that describe address. Otherwise, the // socket level information about conn will also be included. func Client(address string, conn net.Conn) []attribute.KeyValue { return nc.Client(address, conn) } // Server returns trace attributes for a network listener listening at address. // See net.Listen for information about acceptable address values, address // should be the same as the one used to create ln. If ln is nil, only network // host attributes will be returned that describe address. Otherwise, the // socket level information about ln will also be included. func Server(address string, ln net.Listener) []attribute.KeyValue { return nc.Server(address, ln) } opentelemetry-go-1.21.0/semconv/v1.16.0/resource.go000066400000000000000000001176361452547353200216630ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.16.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). const ( // Array of brand name and version separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") // The platform on which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD // be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in the // [`os.type` and `os.name` attributes](./os.md). However, for consistency, the // values in the `browser.platform` attribute should capture the exact value that // the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // A boolean that is true if the browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be // left unset. BrowserMobileKey = attribute.Key("browser.mobile") // Full user-agent string provided by the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 // (KHTML, ' // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' // Note: The user-agent value SHOULD be provided only from browsers that do not // have a mechanism to retrieve brands and platform individually from the User- // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` // API can be used. BrowserUserAgentKey = attribute.Key("browser.user_agent") // Preferred language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'en', 'en-US', 'fr', 'fr-FR' // Note: This value is intended to be taken from the Navigator API // `navigator.language`. BrowserLanguageKey = attribute.Key("browser.language") ) // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // IBM Cloud CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // Red Hat OpenShift on Alibaba Cloud CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Red Hat OpenShift on AWS (ROSA) CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Azure Red Hat OpenShift CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Red Hat OpenShift on Google Cloud CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") // Red Hat OpenShift on IBM Cloud CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // The name of the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) // span attributes). // For some cloud providers, the above definition is ambiguous. The following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud providers/products: // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `faas.id` attribute). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: On some cloud providers, it may not be possible to determine the full ID // at startup, // so consider setting `faas.id` as a span attribute instead. // The exact value to use for `faas.id` depends on the cloud provider: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) // with the resolved function version, as the same runtime instance may be // invokable with // multiple different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id) of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.We // b/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function app can // host multiple functions that would usually share // a TracerProvider. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. For non-containerized Linux systems, the `machine-id` located in // `/etc/machine-id` or `/var/lib/dbus/machine-id` may be used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'fdbf79e8af94cb7f9e8df36789187052' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // Parent Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // RequirementLevel: Optional // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) // Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. const ( // The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' OtelScopeNameKey = attribute.Key("otel.scope.name") // The version of the instrumentation scope - (`InstrumentationScope.Version` in // OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0.0' OtelScopeVersionKey = attribute.Key("otel.scope.version") ) // Span attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. const ( // Deprecated, use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' OtelLibraryNameKey = attribute.Key("otel.library.name") // Deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '1.0.0' OtelLibraryVersionKey = attribute.Key("otel.library.version") ) opentelemetry-go-1.21.0/semconv/v1.16.0/schema.go000066400000000000000000000017141452547353200212610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.16.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.16.0" opentelemetry-go-1.21.0/semconv/v1.16.0/trace.go000066400000000000000000002016361452547353200211240ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.16.0" import "go.opentelemetry.io/otel/attribute" // This document defines the shared attributes used to report a single exception associated with a span or log. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) // This document defines attributes for Events represented using Log Records. const ( // The name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") // The domain identifies the business context for the events. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: Events across different domains may have same `event.name`, yet be // unrelated events. EventDomainKey = attribute.Key("event.domain") ) var ( // Events from browser apps EventDomainBrowser = EventDomainKey.String("browser") // Events from mobile apps EventDomainDevice = EventDomainKey.String("device") // Events from Kubernetes EventDomainK8S = EventDomainKey.String("k8s") ) // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. const ( // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec // .md#id) uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m // d#source-1) identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- // service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // The [version of the CloudEvents specification](https://github.com/cloudevents/s // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp // ec.md#type) contains a value describing the type of event related to the // originating occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. // md#subject) of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // This document defines semantic conventions for the OpenTracing Shim const ( // Parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // RequirementLevel: ConditionallyRequired (If applicable and not explicitly // disabled via instrumentation configuration.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default database // (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's concepts. const ( // Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is // UNSET. // // Type: Enum // RequirementLevel: Optional // Stability: stable OtelStatusCodeKey = attribute.Key("otel.status_code") // Description of the Status if it has a value, otherwise not set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'resource not found' OtelStatusDescriptionKey = attribute.Key("otel.status_description") ) var ( // The operation has been validated by an Application developer or Operator to have completed successfully OtelStatusCodeOk = OtelStatusCodeKey.String("OK") // The operation contains an error OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like AWS or // GCP, the region in which a function is hosted is essential to uniquely identify // the function and also part of its endpoint. Since it's part of the endpoint // being called, the region is always known to clients. In these cases, // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the // client or not required for identifying the invoked function, setting // `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") // Application layer protocol used. The value SHOULD be normalized to lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") // Version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `net.app.protocol.version` refers to the version of the protocol used and // might be different from the protocol client's version. If the HTTP client used // has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should // be set to `1.1`. NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") // Remote socket peer name. // // Type: string // RequirementLevel: Recommended (If available and different from `net.peer.name` // and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local // communication, [etc](https://man7.org/linux/man- // pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // Remote socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.peer.port` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // Protocol [address family](https://man7.org/linux/man- // pages/man7/address_families.7.html) which is used for communication. // // Type: Enum // RequirementLevel: ConditionallyRequired (If different than `inet` and if any of // `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry // SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") // Logical remote hostname, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra // DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // Logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Logical local hostname or similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // Logical local port number, preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // Local socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // Local socket port number. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.host.port` and if `net.sock.host.addr` is set.) // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") // The internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User-Agent](https://www.rfc- // editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- // length) header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- // length) header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Client const ( // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The ordinal number of request resending attempt (for any reason, including // redirects). // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 // Note: The resend count SHOULD be updated each time an HTTP request gets resent // by the client, regardless of what was the cause of the resending (e.g. // redirection, authorization failure, 503 Server Unavailable, network issues, or // any other). HTTPResendCountKey = attribute.Key("http.resend_count") ) // Semantic Convention for HTTP Server const ( // The URI scheme identifying the used protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/path/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") // The matched route (path template in the format used by the respective server // framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP // server framework as the route attribute should have low-cardinality and the URI // path can NOT substitute it. HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.sock.peer.addr`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.sock.peer.addr`, is available even if that other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.sock.peer.addr` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. const ( // The name of the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // The type of the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // The GraphQL document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // RequirementLevel: ConditionallyRequired (If the message destination is either a // `queue` or `topic`.) // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the // value is assumed to be `false`.) // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // The offset of a record in the corresponding Kafka partition. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") // A boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the // value is assumed to be `false`.) // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // The timestamp in milliseconds that the delay message is expected to be // delivered to consumer. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay and delay // time level is not specified.) // Stability: stable // Examples: 1665987217045 MessagingRocketmqDeliveryTimestampKey = attribute.Key("messaging.rocketmq.delivery_timestamp") // The delay time level for delay message, which determines the message delay // time. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay and // delivery timestamp is not specified.) // Stability: stable // Examples: 3 MessagingRocketmqDelayTimeLevelKey = attribute.Key("messaging.rocketmq.delay_time_level") // It is essential for FIFO message. Messages that belong to the same message // group are always processed one by one within the same consumer group. // // Type: string // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) // Stability: stable // Examples: 'myMessageGroup' MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message_group") // Type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default version // (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) opentelemetry-go-1.21.0/semconv/v1.17.0/000077500000000000000000000000001452547353200174705ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.17.0/doc.go000066400000000000000000000016641452547353200205730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.17.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" opentelemetry-go-1.21.0/semconv/v1.17.0/event.go000066400000000000000000000173141452547353200211460ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" import "go.opentelemetry.io/otel/attribute" // This semantic convention defines the attributes used to represent a feature // flag evaluation as an event. const ( // FeatureFlagKeyKey is the attribute Key conforming to the // "feature_flag.key" semantic conventions. It represents the unique // identifier of the feature flag. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'logo-color' FeatureFlagKeyKey = attribute.Key("feature_flag.key") // FeatureFlagProviderNameKey is the attribute Key conforming to the // "feature_flag.provider_name" semantic conventions. It represents the // name of the service provider that performs the flag evaluation. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'Flag Manager' FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") // FeatureFlagVariantKey is the attribute Key conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be // a semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'red', 'true', 'on' // Note: A semantic identifier, commonly referred to as a variant, provides // a means // for referring to a value without including the value itself. This can // provide additional context for understanding the meaning behind a value. // For example, the variant `red` maybe be used for the value `#c05543`. // // A stringified version of the value can be used in situations where a // semantic identifier is unavailable. String representation of the value // should be determined by the implementer. FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ) // FeatureFlagKey returns an attribute KeyValue conforming to the // "feature_flag.key" semantic conventions. It represents the unique identifier // of the feature flag. func FeatureFlagKey(val string) attribute.KeyValue { return FeatureFlagKeyKey.String(val) } // FeatureFlagProviderName returns an attribute KeyValue conforming to the // "feature_flag.provider_name" semantic conventions. It represents the name of // the service provider that performs the flag evaluation. func FeatureFlagProviderName(val string) attribute.KeyValue { return FeatureFlagProviderNameKey.String(val) } // FeatureFlagVariant returns an attribute KeyValue conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be a // semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. func FeatureFlagVariant(val string) attribute.KeyValue { return FeatureFlagVariantKey.String(val) } // RPC received/sent message. const ( // MessageTypeKey is the attribute Key conforming to the "message.type" // semantic conventions. It represents the whether this is a received or // sent message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessageTypeKey = attribute.Key("message.type") // MessageIDKey is the attribute Key conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two // different counters starting from `1` one for sent messages and one for // received message. // // Type: int // RequirementLevel: Optional // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // MessageCompressedSizeKey is the attribute Key conforming to the // "message.compressed_size" semantic conventions. It represents the // compressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // MessageUncompressedSizeKey is the attribute Key conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) // MessageID returns an attribute KeyValue conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two different // counters starting from `1` one for sent messages and one for received // message. func MessageID(val int) attribute.KeyValue { return MessageIDKey.Int(val) } // MessageCompressedSize returns an attribute KeyValue conforming to the // "message.compressed_size" semantic conventions. It represents the compressed // size of the message in bytes. func MessageCompressedSize(val int) attribute.KeyValue { return MessageCompressedSizeKey.Int(val) } // MessageUncompressedSize returns an attribute KeyValue conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. func MessageUncompressedSize(val int) attribute.KeyValue { return MessageUncompressedSizeKey.Int(val) } // The attributes used to report a single exception associated with a span. const ( // ExceptionEscapedKey is the attribute Key conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be // set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of // a span, // if that span is ended while the exception is still logically "in // flight". // This may be actually "in flight" in some languages (e.g. if the // exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most // languages. // // It is usually not possible to determine at the point where an exception // is thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending // the span, // as done in the [example above](#recording-an-exception). // // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // ExceptionEscaped returns an attribute KeyValue conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be set to // true if the exception event is recorded at a point where it is known that // the exception is escaping the scope of the span. func ExceptionEscaped(val bool) attribute.KeyValue { return ExceptionEscapedKey.Bool(val) } opentelemetry-go-1.21.0/semconv/v1.17.0/exception.go000066400000000000000000000014301452547353200220130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.17.0/http.go000066400000000000000000000014401452547353200207750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) opentelemetry-go-1.21.0/semconv/v1.17.0/httpconv/000077500000000000000000000000001452547353200213355ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.17.0/httpconv/http.go000066400000000000000000000146571452547353200226600ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. package httpconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) var ( nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } hc = &internal.HTTPConv{ NetConv: nc, EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, HTTPRouteKey: semconv.HTTPRouteKey, HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, HTTPUserAgentKey: semconv.HTTPUserAgentKey, } ) // ClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", // "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func ClientResponse(resp *http.Response) []attribute.KeyValue { return hc.ClientResponse(resp) } // ClientRequest returns trace attributes for an HTTP request made by a client. // The following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func ClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func ClientStatus(code int) (codes.Code, string) { return hc.ClientStatus(code) } // ServerRequest returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func ServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func ServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } // RequestHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func RequestHeader(h http.Header) []attribute.KeyValue { return hc.RequestHeader(h) } // ResponseHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func ResponseHeader(h http.Header) []attribute.KeyValue { return hc.ResponseHeader(h) } opentelemetry-go-1.21.0/semconv/v1.17.0/netconv/000077500000000000000000000000001452547353200211445ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.17.0/netconv/net.go000066400000000000000000000053211452547353200222620ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconv provides OpenTelemetry network semantic conventions for // tracing telemetry. package netconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/netconv" import ( "net" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) var nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetSockHostAddrKey: semconv.NetSockHostAddrKey, NetSockHostPortKey: semconv.NetSockHostPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } // Transport returns a trace attribute describing the transport protocol of the // passed network. See the net.Dial for information about acceptable network // values. func Transport(network string) attribute.KeyValue { return nc.Transport(network) } // Client returns trace attributes for a client network connection to address. // See net.Dial for information about acceptable address values, address should // be the same as the one used to create conn. If conn is nil, only network // peer attributes will be returned that describe address. Otherwise, the // socket level information about conn will also be included. func Client(address string, conn net.Conn) []attribute.KeyValue { return nc.Client(address, conn) } // Server returns trace attributes for a network listener listening at address. // See net.Listen for information about acceptable address values, address // should be the same as the one used to create ln. If ln is nil, only network // host attributes will be returned that describe address. Otherwise, the // socket level information about ln will also be included. func Server(address string, ln net.Listener) []attribute.KeyValue { return nc.Server(address, ln) } opentelemetry-go-1.21.0/semconv/v1.17.0/resource.go000066400000000000000000002307421452547353200216560ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is // running. The `browser.*` attributes MUST be used only for resources that // represent applications running in a web browser (regardless of whether // running on a mobile or desktop device). const ( // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" // semantic conventions. It represents the array of brand name and version // separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") // BrowserPlatformKey is the attribute Key conforming to the // "browser.platform" semantic conventions. It represents the platform on // which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute // SHOULD be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client // Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in // the [`os.type` and `os.name` attributes](./os.md). However, for // consistency, the values in the `browser.platform` attribute should // capture the exact value that the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" // semantic conventions. It represents a boolean that is true if the // browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.mobile`). If unavailable, this attribute // SHOULD be left unset. BrowserMobileKey = attribute.Key("browser.mobile") // BrowserUserAgentKey is the attribute Key conforming to the // "browser.user_agent" semantic conventions. It represents the full // user-agent string provided by the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) // AppleWebKit/537.36 (KHTML, ' // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' // Note: The user-agent value SHOULD be provided only from browsers that do // not have a mechanism to retrieve brands and platform individually from // the User-Agent Client Hints API. To retrieve the value, the legacy // `navigator.userAgent` API can be used. BrowserUserAgentKey = attribute.Key("browser.user_agent") // BrowserLanguageKey is the attribute Key conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'en', 'en-US', 'fr', 'fr-FR' // Note: This value is intended to be taken from the Navigator API // `navigator.language`. BrowserLanguageKey = attribute.Key("browser.language") ) // BrowserBrands returns an attribute KeyValue conforming to the // "browser.brands" semantic conventions. It represents the array of brand name // and version separated by a space func BrowserBrands(val ...string) attribute.KeyValue { return BrowserBrandsKey.StringSlice(val) } // BrowserPlatform returns an attribute KeyValue conforming to the // "browser.platform" semantic conventions. It represents the platform on which // the browser is running func BrowserPlatform(val string) attribute.KeyValue { return BrowserPlatformKey.String(val) } // BrowserMobile returns an attribute KeyValue conforming to the // "browser.mobile" semantic conventions. It represents a boolean that is true // if the browser is running on a mobile device func BrowserMobile(val bool) attribute.KeyValue { return BrowserMobileKey.Bool(val) } // BrowserUserAgent returns an attribute KeyValue conforming to the // "browser.user_agent" semantic conventions. It represents the full user-agent // string provided by the browser func BrowserUserAgent(val string) attribute.KeyValue { return BrowserUserAgentKey.String(val) } // BrowserLanguage returns an attribute KeyValue conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser func BrowserLanguage(val string) attribute.KeyValue { return BrowserLanguageKey.String(val) } // A cloud environment (e.g. GCP, Azure, AWS) const ( // CloudProviderKey is the attribute Key conforming to the "cloud.provider" // semantic conventions. It represents the name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // CloudAccountIDKey is the attribute Key conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account // ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // CloudRegionKey is the attribute Key conforming to the "cloud.region" // semantic conventions. It represents the geographical region the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for // example [Alibaba Cloud // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), // [Google Cloud regions](https://cloud.google.com/about/locations), or // [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // CloudAvailabilityZoneKey is the attribute Key conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the // resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google // Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" // semantic conventions. It represents the cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // IBM Cloud CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // Red Hat OpenShift on Alibaba Cloud CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Red Hat OpenShift on AWS (ROSA) CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Azure Red Hat OpenShift CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Red Hat OpenShift on Google Cloud CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") // Red Hat OpenShift on IBM Cloud CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // CloudAccountID returns an attribute KeyValue conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account ID // the resource is assigned to. func CloudAccountID(val string) attribute.KeyValue { return CloudAccountIDKey.String(val) } // CloudRegion returns an attribute KeyValue conforming to the // "cloud.region" semantic conventions. It represents the geographical region // the resource is running. func CloudRegion(val string) attribute.KeyValue { return CloudRegionKey.String(val) } // CloudAvailabilityZone returns an attribute KeyValue conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to increase // availability. Availability zone represents the zone where the resource is // running. func CloudAvailabilityZone(val string) attribute.KeyValue { return CloudAvailabilityZoneKey.String(val) } // Resources used by AWS Elastic Container Service (ECS). const ( // AWSECSContainerARNKey is the attribute Key conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // AWSECSClusterARNKey is the attribute Key conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an // [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // AWSECSLaunchtypeKey is the attribute Key conforming to the // "aws.ecs.launchtype" semantic conventions. It represents the [launch // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) // for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // AWSECSTaskARNKey is the attribute Key conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an // [ECS task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // AWSECSTaskFamilyKey is the attribute Key conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // AWSECSTaskRevisionKey is the attribute Key conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision // for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // AWSECSContainerARN returns an attribute KeyValue conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). func AWSECSContainerARN(val string) attribute.KeyValue { return AWSECSContainerARNKey.String(val) } // AWSECSClusterARN returns an attribute KeyValue conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). func AWSECSClusterARN(val string) attribute.KeyValue { return AWSECSClusterARNKey.String(val) } // AWSECSTaskARN returns an attribute KeyValue conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS // task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). func AWSECSTaskARN(val string) attribute.KeyValue { return AWSECSTaskARNKey.String(val) } // AWSECSTaskFamily returns an attribute KeyValue conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. func AWSECSTaskFamily(val string) attribute.KeyValue { return AWSECSTaskFamilyKey.String(val) } // AWSECSTaskRevision returns an attribute KeyValue conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision for // this task definition. func AWSECSTaskRevision(val string) attribute.KeyValue { return AWSECSTaskRevisionKey.String(val) } // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // AWSEKSClusterARNKey is the attribute Key conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an // EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // AWSEKSClusterARN returns an attribute KeyValue conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS // cluster. func AWSEKSClusterARN(val string) attribute.KeyValue { return AWSEKSClusterARNKey.String(val) } // Resources specific to Amazon Web Services. const ( // AWSLogGroupNamesKey is the attribute Key conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of // the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like // multi-container applications, where a single application has sidecar // containers, and each write to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // AWSLogGroupARNsKey is the attribute Key conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon // Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // AWSLogStreamNamesKey is the attribute Key conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) // of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // AWSLogStreamARNsKey is the attribute Key conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of // the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). // One log group can contain several log streams, so these ARNs necessarily // identify both a log group and a log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // AWSLogGroupNames returns an attribute KeyValue conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of the // AWS log group(s) an application is writing to. func AWSLogGroupNames(val ...string) attribute.KeyValue { return AWSLogGroupNamesKey.StringSlice(val) } // AWSLogGroupARNs returns an attribute KeyValue conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource // Name(s) (ARN) of the AWS log group(s). func AWSLogGroupARNs(val ...string) attribute.KeyValue { return AWSLogGroupARNsKey.StringSlice(val) } // AWSLogStreamNames returns an attribute KeyValue conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) of // the AWS log stream(s) an application is writing to. func AWSLogStreamNames(val ...string) attribute.KeyValue { return AWSLogStreamNamesKey.StringSlice(val) } // AWSLogStreamARNs returns an attribute KeyValue conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the // AWS log stream(s). func AWSLogStreamARNs(val ...string) attribute.KeyValue { return AWSLogStreamARNsKey.StringSlice(val) } // A container instance. const ( // ContainerNameKey is the attribute Key conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // ContainerIDKey is the attribute Key conforming to the "container.id" // semantic conventions. It represents the container ID. Usually a UUID, as // for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // ContainerRuntimeKey is the attribute Key conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // ContainerImageNameKey is the attribute Key conforming to the // "container.image.name" semantic conventions. It represents the name of // the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // ContainerImageTagKey is the attribute Key conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // ContainerName returns an attribute KeyValue conforming to the // "container.name" semantic conventions. It represents the container name used // by container runtime. func ContainerName(val string) attribute.KeyValue { return ContainerNameKey.String(val) } // ContainerID returns an attribute KeyValue conforming to the // "container.id" semantic conventions. It represents the container ID. Usually // a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. func ContainerID(val string) attribute.KeyValue { return ContainerIDKey.String(val) } // ContainerRuntime returns an attribute KeyValue conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. func ContainerRuntime(val string) attribute.KeyValue { return ContainerRuntimeKey.String(val) } // ContainerImageName returns an attribute KeyValue conforming to the // "container.image.name" semantic conventions. It represents the name of the // image the container was built on. func ContainerImageName(val string) attribute.KeyValue { return ContainerImageNameKey.String(val) } // ContainerImageTag returns an attribute KeyValue conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. func ContainerImageTag(val string) attribute.KeyValue { return ContainerImageTagKey.String(val) } // The software deployment. const ( // DeploymentEnvironmentKey is the attribute Key conforming to the // "deployment.environment" semantic conventions. It represents the name of // the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // DeploymentEnvironment returns an attribute KeyValue conforming to the // "deployment.environment" semantic conventions. It represents the name of the // [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). func DeploymentEnvironment(val string) attribute.KeyValue { return DeploymentEnvironmentKey.String(val) } // The device on which the process represented by this resource is running. const ( // DeviceIDKey is the attribute Key conforming to the "device.id" semantic // conventions. It represents a unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values // outlined below. This value is not an advertising identifier and MUST NOT // be used as such. On iOS (Swift or Objective-C), this value MUST be equal // to the [vendor // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). // On Android (Java or Kotlin), this value MUST be equal to the Firebase // Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on // best practices and exact implementation details. Caution should be taken // when storing personal data or anything which can identify a user. GDPR // and data protection laws may apply, ensure you do your own due // diligence. DeviceIDKey = attribute.Key("device.id") // DeviceModelIdentifierKey is the attribute Key conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version // of the model identifier rather than the market or consumer-friendly name // of the device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // DeviceModelNameKey is the attribute Key conforming to the // "device.model.name" semantic conventions. It represents the marketing // name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of // the device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // DeviceManufacturerKey is the attribute Key conforming to the // "device.manufacturer" semantic conventions. It represents the name of // the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // DeviceID returns an attribute KeyValue conforming to the "device.id" // semantic conventions. It represents a unique identifier representing the // device func DeviceID(val string) attribute.KeyValue { return DeviceIDKey.String(val) } // DeviceModelIdentifier returns an attribute KeyValue conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device func DeviceModelIdentifier(val string) attribute.KeyValue { return DeviceModelIdentifierKey.String(val) } // DeviceModelName returns an attribute KeyValue conforming to the // "device.model.name" semantic conventions. It represents the marketing name // for the device model func DeviceModelName(val string) attribute.KeyValue { return DeviceModelNameKey.String(val) } // DeviceManufacturer returns an attribute KeyValue conforming to the // "device.manufacturer" semantic conventions. It represents the name of the // device manufacturer func DeviceManufacturer(val string) attribute.KeyValue { return DeviceManufacturerKey.String(val) } // A serverless instance. const ( // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic // conventions. It represents the name of the single function that this // runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the // FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) // span attributes). // // For some cloud providers, the above definition is ambiguous. The // following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud // providers/products: // // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `faas.id` attribute). FaaSNameKey = attribute.Key("faas.name") // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic // conventions. It represents the unique ID of the single function that // this runtime instance executes. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: On some cloud providers, it may not be possible to determine the // full ID at startup, // so consider setting `faas.id` as a span attribute instead. // // The exact value to use for `faas.id` depends on the cloud provider: // // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) // with the resolved function version, as the same runtime instance may // be invokable with // multiple different aliases. // * **GCP:** The [URI of the // resource](https://cloud.google.com/iam/docs/full-resource-names) // * **Azure:** The [Fully Qualified Resource // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider. FaaSIDKey = attribute.Key("faas.id") // FaaSVersionKey is the attribute Key conforming to the "faas.version" // semantic conventions. It represents the immutable version of the // function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" // semantic conventions. It represents the execution environment ID as a // string, that will be potentially reused for other invocations to the // same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // FaaSMaxMemoryKey is the attribute Key conforming to the // "faas.max_memory" semantic conventions. It represents the amount of // memory available to the serverless function in MiB. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little // memory can easily stop a Java AWS Lambda function from working // correctly. On AWS Lambda, the environment variable // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // FaaSName returns an attribute KeyValue conforming to the "faas.name" // semantic conventions. It represents the name of the single function that // this runtime instance executes. func FaaSName(val string) attribute.KeyValue { return FaaSNameKey.String(val) } // FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic // conventions. It represents the unique ID of the single function that this // runtime instance executes. func FaaSID(val string) attribute.KeyValue { return FaaSIDKey.String(val) } // FaaSVersion returns an attribute KeyValue conforming to the // "faas.version" semantic conventions. It represents the immutable version of // the function being executed. func FaaSVersion(val string) attribute.KeyValue { return FaaSVersionKey.String(val) } // FaaSInstance returns an attribute KeyValue conforming to the // "faas.instance" semantic conventions. It represents the execution // environment ID as a string, that will be potentially reused for other // invocations to the same function/function version. func FaaSInstance(val string) attribute.KeyValue { return FaaSInstanceKey.String(val) } // FaaSMaxMemory returns an attribute KeyValue conforming to the // "faas.max_memory" semantic conventions. It represents the amount of memory // available to the serverless function in MiB. func FaaSMaxMemory(val int) attribute.KeyValue { return FaaSMaxMemoryKey.Int(val) } // A host is defined as a general computing instance. const ( // HostIDKey is the attribute Key conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be // the instance_id assigned by the cloud provider. For non-containerized // Linux systems, the `machine-id` located in `/etc/machine-id` or // `/var/lib/dbus/machine-id` may be used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'fdbf79e8af94cb7f9e8df36789187052' HostIDKey = attribute.Key("host.id") // HostNameKey is the attribute Key conforming to the "host.name" semantic // conventions. It represents the name of the host. On Unix systems, it may // contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // HostTypeKey is the attribute Key conforming to the "host.type" semantic // conventions. It represents the type of host. For Cloud, this must be the // machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // HostArchKey is the attribute Key conforming to the "host.arch" semantic // conventions. It represents the CPU architecture the host system is // running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // HostImageNameKey is the attribute Key conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // HostImageIDKey is the attribute Key conforming to the "host.image.id" // semantic conventions. It represents the vM image ID. For Cloud, this // value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // HostImageVersionKey is the attribute Key conforming to the // "host.image.version" semantic conventions. It represents the version // string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // HostID returns an attribute KeyValue conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be the // instance_id assigned by the cloud provider. For non-containerized Linux // systems, the `machine-id` located in `/etc/machine-id` or // `/var/lib/dbus/machine-id` may be used. func HostID(val string) attribute.KeyValue { return HostIDKey.String(val) } // HostName returns an attribute KeyValue conforming to the "host.name" // semantic conventions. It represents the name of the host. On Unix systems, // it may contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. func HostName(val string) attribute.KeyValue { return HostNameKey.String(val) } // HostType returns an attribute KeyValue conforming to the "host.type" // semantic conventions. It represents the type of host. For Cloud, this must // be the machine type. func HostType(val string) attribute.KeyValue { return HostTypeKey.String(val) } // HostImageName returns an attribute KeyValue conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. func HostImageName(val string) attribute.KeyValue { return HostImageNameKey.String(val) } // HostImageID returns an attribute KeyValue conforming to the // "host.image.id" semantic conventions. It represents the vM image ID. For // Cloud, this value is from the provider. func HostImageID(val string) attribute.KeyValue { return HostImageIDKey.String(val) } // HostImageVersion returns an attribute KeyValue conforming to the // "host.image.version" semantic conventions. It represents the version string // of the VM image as defined in [Version // Attributes](README.md#version-attributes). func HostImageVersion(val string) attribute.KeyValue { return HostImageVersionKey.String(val) } // A Kubernetes Cluster. const ( // K8SClusterNameKey is the attribute Key conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // K8SClusterName returns an attribute KeyValue conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. func K8SClusterName(val string) attribute.KeyValue { return K8SClusterNameKey.String(val) } // A Kubernetes Node object. const ( // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // K8SNodeName returns an attribute KeyValue conforming to the // "k8s.node.name" semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { return K8SNodeNameKey.String(val) } // K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } // A Kubernetes Namespace. const ( // K8SNamespaceNameKey is the attribute Key conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } // A Kubernetes Pod object. const ( // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { return K8SPodNameKey.String(val) } // A container in a // [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // K8SContainerNameKey is the attribute Key conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // K8SContainerRestartCountKey is the attribute Key conforming to the // "k8s.container.restart_count" semantic conventions. It represents the // number of times the container was restarted. This attribute can be used // to identify a particular container (running or stopped) within a // container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // K8SContainerName returns an attribute KeyValue conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). func K8SContainerName(val string) attribute.KeyValue { return K8SContainerNameKey.String(val) } // K8SContainerRestartCount returns an attribute KeyValue conforming to the // "k8s.container.restart_count" semantic conventions. It represents the number // of times the container was restarted. This attribute can be used to identify // a particular container (running or stopped) within a container spec. func K8SContainerRestartCount(val int) attribute.KeyValue { return K8SContainerRestartCountKey.Int(val) } // A Kubernetes ReplicaSet object. const ( // K8SReplicaSetUIDKey is the attribute Key conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // K8SReplicaSetNameKey is the attribute Key conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of // the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // K8SReplicaSetUID returns an attribute KeyValue conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. func K8SReplicaSetUID(val string) attribute.KeyValue { return K8SReplicaSetUIDKey.String(val) } // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. func K8SReplicaSetName(val string) attribute.KeyValue { return K8SReplicaSetNameKey.String(val) } // A Kubernetes Deployment object. const ( // K8SDeploymentUIDKey is the attribute Key conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // K8SDeploymentNameKey is the attribute Key conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of // the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // K8SDeploymentUID returns an attribute KeyValue conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. func K8SDeploymentUID(val string) attribute.KeyValue { return K8SDeploymentUIDKey.String(val) } // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. func K8SDeploymentName(val string) attribute.KeyValue { return K8SDeploymentNameKey.String(val) } // A Kubernetes StatefulSet object. const ( // K8SStatefulSetUIDKey is the attribute Key conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // K8SStatefulSetNameKey is the attribute Key conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of // the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // K8SStatefulSetUID returns an attribute KeyValue conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. func K8SStatefulSetUID(val string) attribute.KeyValue { return K8SStatefulSetUIDKey.String(val) } // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. func K8SStatefulSetName(val string) attribute.KeyValue { return K8SStatefulSetNameKey.String(val) } // A Kubernetes DaemonSet object. const ( // K8SDaemonSetUIDKey is the attribute Key conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // K8SDaemonSetNameKey is the attribute Key conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // K8SDaemonSetUID returns an attribute KeyValue conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. func K8SDaemonSetName(val string) attribute.KeyValue { return K8SDaemonSetNameKey.String(val) } // A Kubernetes Job object. const ( // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { return K8SJobNameKey.String(val) } // A Kubernetes CronJob object. const ( // K8SCronJobUIDKey is the attribute Key conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // K8SCronJobNameKey is the attribute Key conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // K8SCronJobUID returns an attribute KeyValue conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. func K8SCronJobName(val string) attribute.KeyValue { return K8SCronJobNameKey.String(val) } // The operating system (OS) on which the process represented by this resource // is running. const ( // OSTypeKey is the attribute Key conforming to the "os.type" semantic // conventions. It represents the operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // OSDescriptionKey is the attribute Key conforming to the "os.description" // semantic conventions. It represents the human readable (not intended to // be parsed) OS version information, like e.g. reported by `ver` or // `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 // LTS' OSDescriptionKey = attribute.Key("os.description") // OSNameKey is the attribute Key conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // OSVersionKey is the attribute Key conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // OSDescription returns an attribute KeyValue conforming to the // "os.description" semantic conventions. It represents the human readable (not // intended to be parsed) OS version information, like e.g. reported by `ver` // or `lsb_release -a` commands. func OSDescription(val string) attribute.KeyValue { return OSDescriptionKey.String(val) } // OSName returns an attribute KeyValue conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. func OSName(val string) attribute.KeyValue { return OSNameKey.String(val) } // OSVersion returns an attribute KeyValue conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). func OSVersion(val string) attribute.KeyValue { return OSVersionKey.String(val) } // An operating system process. const ( // ProcessPIDKey is the attribute Key conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // ProcessParentPIDKey is the attribute Key conforming to the // "process.parent_pid" semantic conventions. It represents the parent // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // ProcessExecutableNameKey is the attribute Key conforming to the // "process.executable.name" semantic conventions. It represents the name // of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name // of `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // ProcessExecutablePathKey is the attribute Key conforming to the // "process.executable.path" semantic conventions. It represents the full // path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // ProcessCommandKey is the attribute Key conforming to the // "process.command" semantic conventions. It represents the command used // to launch the process (i.e. the command name). On Linux based systems, // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can // be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // ProcessCommandLineKey is the attribute Key conforming to the // "process.command_line" semantic conventions. It represents the full // command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. // Do not set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // ProcessCommandArgsKey is the attribute Key conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // ProcessOwnerKey is the attribute Key conforming to the "process.owner" // semantic conventions. It represents the username of the user that owns // the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // ProcessPID returns an attribute KeyValue conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). func ProcessPID(val int) attribute.KeyValue { return ProcessPIDKey.Int(val) } // ProcessParentPID returns an attribute KeyValue conforming to the // "process.parent_pid" semantic conventions. It represents the parent Process // identifier (PID). func ProcessParentPID(val int) attribute.KeyValue { return ProcessParentPIDKey.Int(val) } // ProcessExecutableName returns an attribute KeyValue conforming to the // "process.executable.name" semantic conventions. It represents the name of // the process executable. On Linux based systems, can be set to the `Name` in // `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. func ProcessExecutableName(val string) attribute.KeyValue { return ProcessExecutableNameKey.String(val) } // ProcessExecutablePath returns an attribute KeyValue conforming to the // "process.executable.path" semantic conventions. It represents the full path // to the process executable. On Linux based systems, can be set to the target // of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. func ProcessExecutablePath(val string) attribute.KeyValue { return ProcessExecutablePathKey.String(val) } // ProcessCommand returns an attribute KeyValue conforming to the // "process.command" semantic conventions. It represents the command used to // launch the process (i.e. the command name). On Linux based systems, can be // set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to // the first parameter extracted from `GetCommandLineW`. func ProcessCommand(val string) attribute.KeyValue { return ProcessCommandKey.String(val) } // ProcessCommandLine returns an attribute KeyValue conforming to the // "process.command_line" semantic conventions. It represents the full command // used to launch the process as a single string representing the full command. // On Windows, can be set to the result of `GetCommandLineW`. Do not set this // if you have to assemble it just for monitoring; use `process.command_args` // instead. func ProcessCommandLine(val string) attribute.KeyValue { return ProcessCommandLineKey.String(val) } // ProcessCommandArgs returns an attribute KeyValue conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received by // the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. func ProcessCommandArgs(val ...string) attribute.KeyValue { return ProcessCommandArgsKey.StringSlice(val) } // ProcessOwner returns an attribute KeyValue conforming to the // "process.owner" semantic conventions. It represents the username of the user // that owns the process. func ProcessOwner(val string) attribute.KeyValue { return ProcessOwnerKey.String(val) } // The single (language) runtime instance which is monitored. const ( // ProcessRuntimeNameKey is the attribute Key conforming to the // "process.runtime.name" semantic conventions. It represents the name of // the runtime of this process. For compiled native binaries, this SHOULD // be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // ProcessRuntimeVersionKey is the attribute Key conforming to the // "process.runtime.version" semantic conventions. It represents the // version of the runtime of this process, as returned by the runtime // without modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // ProcessRuntimeDescriptionKey is the attribute Key conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // ProcessRuntimeName returns an attribute KeyValue conforming to the // "process.runtime.name" semantic conventions. It represents the name of the // runtime of this process. For compiled native binaries, this SHOULD be the // name of the compiler. func ProcessRuntimeName(val string) attribute.KeyValue { return ProcessRuntimeNameKey.String(val) } // ProcessRuntimeVersion returns an attribute KeyValue conforming to the // "process.runtime.version" semantic conventions. It represents the version of // the runtime of this process, as returned by the runtime without // modification. func ProcessRuntimeVersion(val string) attribute.KeyValue { return ProcessRuntimeVersionKey.String(val) } // ProcessRuntimeDescription returns an attribute KeyValue conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. func ProcessRuntimeDescription(val string) attribute.KeyValue { return ProcessRuntimeDescriptionKey.String(val) } // A service instance. const ( // ServiceNameKey is the attribute Key conforming to the "service.name" // semantic conventions. It represents the logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled // services. If the value was not specified, SDKs MUST fallback to // `unknown_service:` concatenated with // [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, // the value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // ServiceNamespaceKey is the attribute Key conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group // of services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` // is expected to be unique for all services that have no explicit // namespace defined (so the empty/unspecified namespace is simply one more // valid namespace). Zero-length namespace string is assumed equal to // unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // ServiceInstanceIDKey is the attribute Key conforming to the // "service.instance.id" semantic conventions. It represents the string ID // of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be // globally unique). The ID helps to distinguish instances of the same // service that exist at the same time (e.g. instances of a horizontally // scaled service). It is preferable for the ID to be persistent and stay // the same for the lifetime of the service instance, however it is // acceptable that the ID is ephemeral and changes during important // lifetime events for the service (e.g. service restarts). If the service // has no inherent unique ID that can be used as the value of this // attribute it is recommended to generate a random Version 1 or Version 4 // RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // ServiceVersionKey is the attribute Key conforming to the // "service.version" semantic conventions. It represents the version string // of the service API or implementation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // ServiceName returns an attribute KeyValue conforming to the // "service.name" semantic conventions. It represents the logical name of the // service. func ServiceName(val string) attribute.KeyValue { return ServiceNameKey.String(val) } // ServiceNamespace returns an attribute KeyValue conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. func ServiceNamespace(val string) attribute.KeyValue { return ServiceNamespaceKey.String(val) } // ServiceInstanceID returns an attribute KeyValue conforming to the // "service.instance.id" semantic conventions. It represents the string ID of // the service instance. func ServiceInstanceID(val string) attribute.KeyValue { return ServiceInstanceIDKey.String(val) } // ServiceVersion returns an attribute KeyValue conforming to the // "service.version" semantic conventions. It represents the version string of // the service API or implementation. func ServiceVersion(val string) attribute.KeyValue { return ServiceVersionKey.String(val) } // The telemetry SDK used to capture data recorded by the instrumentation // libraries. const ( // TelemetrySDKNameKey is the attribute Key conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // TelemetrySDKLanguageKey is the attribute Key conforming to the // "telemetry.sdk.language" semantic conventions. It represents the // language of the telemetry SDK. // // Type: Enum // RequirementLevel: Optional // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // TelemetrySDKVersionKey is the attribute Key conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // TelemetryAutoVersionKey is the attribute Key conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // TelemetrySDKName returns an attribute KeyValue conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. func TelemetrySDKName(val string) attribute.KeyValue { return TelemetrySDKNameKey.String(val) } // TelemetrySDKVersion returns an attribute KeyValue conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. func TelemetrySDKVersion(val string) attribute.KeyValue { return TelemetrySDKVersionKey.String(val) } // TelemetryAutoVersion returns an attribute KeyValue conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. func TelemetryAutoVersion(val string) attribute.KeyValue { return TelemetryAutoVersionKey.String(val) } // Resource describing the packaged software running the application code. Web // engines are typically executed using process.runtime. const ( // WebEngineNameKey is the attribute Key conforming to the "webengine.name" // semantic conventions. It represents the name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // WebEngineVersionKey is the attribute Key conforming to the // "webengine.version" semantic conventions. It represents the version of // the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // WebEngineDescriptionKey is the attribute Key conforming to the // "webengine.description" semantic conventions. It represents the // additional description of the web engine (e.g. detailed version and // edition information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - // 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) // WebEngineName returns an attribute KeyValue conforming to the // "webengine.name" semantic conventions. It represents the name of the web // engine. func WebEngineName(val string) attribute.KeyValue { return WebEngineNameKey.String(val) } // WebEngineVersion returns an attribute KeyValue conforming to the // "webengine.version" semantic conventions. It represents the version of the // web engine. func WebEngineVersion(val string) attribute.KeyValue { return WebEngineVersionKey.String(val) } // WebEngineDescription returns an attribute KeyValue conforming to the // "webengine.description" semantic conventions. It represents the additional // description of the web engine (e.g. detailed version and edition // information). func WebEngineDescription(val string) attribute.KeyValue { return WebEngineDescriptionKey.String(val) } // Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's // concepts. const ( // OtelScopeNameKey is the attribute Key conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' OtelScopeNameKey = attribute.Key("otel.scope.name") // OtelScopeVersionKey is the attribute Key conforming to the // "otel.scope.version" semantic conventions. It represents the version of // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0.0' OtelScopeVersionKey = attribute.Key("otel.scope.version") ) // OtelScopeName returns an attribute KeyValue conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). func OtelScopeName(val string) attribute.KeyValue { return OtelScopeNameKey.String(val) } // OtelScopeVersion returns an attribute KeyValue conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). func OtelScopeVersion(val string) attribute.KeyValue { return OtelScopeVersionKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry // Scope's concepts. const ( // OtelLibraryNameKey is the attribute Key conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, // use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' OtelLibraryNameKey = attribute.Key("otel.library.name") // OtelLibraryVersionKey is the attribute Key conforming to the // "otel.library.version" semantic conventions. It represents the // deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '1.0.0' OtelLibraryVersionKey = attribute.Key("otel.library.version") ) // OtelLibraryName returns an attribute KeyValue conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, use // the `otel.scope.name` attribute. func OtelLibraryName(val string) attribute.KeyValue { return OtelLibraryNameKey.String(val) } // OtelLibraryVersion returns an attribute KeyValue conforming to the // "otel.library.version" semantic conventions. It represents the deprecated, // use the `otel.scope.version` attribute. func OtelLibraryVersion(val string) attribute.KeyValue { return OtelLibraryVersionKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.17.0/schema.go000066400000000000000000000017141452547353200212620ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" opentelemetry-go-1.21.0/semconv/v1.17.0/trace.go000066400000000000000000004137041452547353200211260ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" import "go.opentelemetry.io/otel/attribute" // The shared attributes used to report a single exception associated with a // span or log. const ( // ExceptionTypeKey is the attribute Key conforming to the "exception.type" // semantic conventions. It represents the type of the exception (its // fully-qualified class name, if applicable). The dynamic type of the // exception should be preferred over the static type in languages that // support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // ExceptionMessageKey is the attribute Key conforming to the // "exception.message" semantic conventions. It represents the exception // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str // implicitly" ExceptionMessageKey = attribute.Key("exception.message") // ExceptionStacktraceKey is the attribute Key conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace // as a string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) // ExceptionType returns an attribute KeyValue conforming to the // "exception.type" semantic conventions. It represents the type of the // exception (its fully-qualified class name, if applicable). The dynamic type // of the exception should be preferred over the static type in languages that // support it. func ExceptionType(val string) attribute.KeyValue { return ExceptionTypeKey.String(val) } // ExceptionMessage returns an attribute KeyValue conforming to the // "exception.message" semantic conventions. It represents the exception // message. func ExceptionMessage(val string) attribute.KeyValue { return ExceptionMessageKey.String(val) } // ExceptionStacktrace returns an attribute KeyValue conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace as a // string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. func ExceptionStacktrace(val string) attribute.KeyValue { return ExceptionStacktraceKey.String(val) } // Attributes for Events represented using Log Records. const ( // EventNameKey is the attribute Key conforming to the "event.name" // semantic conventions. It represents the name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") // EventDomainKey is the attribute Key conforming to the "event.domain" // semantic conventions. It represents the domain identifies the business // context for the events. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: Events across different domains may have same `event.name`, yet be // unrelated events. EventDomainKey = attribute.Key("event.domain") ) var ( // Events from browser apps EventDomainBrowser = EventDomainKey.String("browser") // Events from mobile apps EventDomainDevice = EventDomainKey.String("device") // Events from Kubernetes EventDomainK8S = EventDomainKey.String("k8s") ) // EventName returns an attribute KeyValue conforming to the "event.name" // semantic conventions. It represents the name identifies the event. func EventName(val string) attribute.KeyValue { return EventNameKey.String(val) } // Span attributes used by AWS Lambda (in addition to general `faas` // attributes). const ( // AWSLambdaInvokedARNKey is the attribute Key conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // AWSLambdaInvokedARN returns an attribute KeyValue conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). func AWSLambdaInvokedARN(val string) attribute.KeyValue { return AWSLambdaInvokedARNKey.String(val) } // Attributes for CloudEvents. CloudEvents is a specification on how to define // event data in a standard way. These attributes can be attached to spans when // performing operations with CloudEvents, regardless of the protocol being // used. const ( // CloudeventsEventIDKey is the attribute Key conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // CloudeventsEventSourceKey is the attribute Key conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', // '/cloudevents/spec/pull/123', 'my-service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // CloudeventsEventSpecVersionKey is the attribute Key conforming to the // "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // CloudeventsEventTypeKey is the attribute Key conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', // 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // CloudeventsEventSubjectKey is the attribute Key conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // CloudeventsEventID returns an attribute KeyValue conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. func CloudeventsEventID(val string) attribute.KeyValue { return CloudeventsEventIDKey.String(val) } // CloudeventsEventSource returns an attribute KeyValue conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. func CloudeventsEventSource(val string) attribute.KeyValue { return CloudeventsEventSourceKey.String(val) } // CloudeventsEventSpecVersion returns an attribute KeyValue conforming to // the "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. func CloudeventsEventSpecVersion(val string) attribute.KeyValue { return CloudeventsEventSpecVersionKey.String(val) } // CloudeventsEventType returns an attribute KeyValue conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. func CloudeventsEventType(val string) attribute.KeyValue { return CloudeventsEventTypeKey.String(val) } // CloudeventsEventSubject returns an attribute KeyValue conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by source). func CloudeventsEventSubject(val string) attribute.KeyValue { return CloudeventsEventSubjectKey.String(val) } // Semantic conventions for the OpenTracing Shim const ( // OpentracingRefTypeKey is the attribute Key conforming to the // "opentracing.ref_type" semantic conventions. It represents the // parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // The attributes used to perform database client calls. const ( // DBSystemKey is the attribute Key conforming to the "db.system" semantic // conventions. It represents an identifier for the database management // system (DBMS) product being used. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // DBConnectionStringKey is the attribute Key conforming to the // "db.connection_string" semantic conventions. It represents the // connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // DBUserKey is the attribute Key conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // DBJDBCDriverClassnameKey is the attribute Key conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) // driver used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // DBNameKey is the attribute Key conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name // of the database being accessed. For commands that switch the database, // this should be set to the target database (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called // "schema name". In case there are multiple layers that could be // considered for database name (e.g. Oracle instance name and schema // name), the database name to be used is the more specific layer (e.g. // Oracle schema name). DBNameKey = attribute.Key("db.name") // DBStatementKey is the attribute Key conforming to the "db.statement" // semantic conventions. It represents the database statement being // executed. // // Type: string // RequirementLevel: ConditionallyRequired (If applicable and not // explicitly disabled via instrumentation configuration.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // DBOperationKey is the attribute Key conforming to the "db.operation" // semantic conventions. It represents the name of the operation being // executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not // applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to // attempt any client-side parsing of `db.statement` just to get this // property, but it should be set if the operation name is provided by the // library being instrumented. If the SQL statement has an ambiguous // operation, or performs more than one operation, this value may be // omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") // ClickHouse DBSystemClickhouse = DBSystemKey.String("clickhouse") ) // DBConnectionString returns an attribute KeyValue conforming to the // "db.connection_string" semantic conventions. It represents the connection // string used to connect to the database. It is recommended to remove embedded // credentials. func DBConnectionString(val string) attribute.KeyValue { return DBConnectionStringKey.String(val) } // DBUser returns an attribute KeyValue conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. func DBUser(val string) attribute.KeyValue { return DBUserKey.String(val) } // DBJDBCDriverClassname returns an attribute KeyValue conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. func DBJDBCDriverClassname(val string) attribute.KeyValue { return DBJDBCDriverClassnameKey.String(val) } // DBName returns an attribute KeyValue conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name of // the database being accessed. For commands that switch the database, this // should be set to the target database (even if the command fails). func DBName(val string) attribute.KeyValue { return DBNameKey.String(val) } // DBStatement returns an attribute KeyValue conforming to the // "db.statement" semantic conventions. It represents the database statement // being executed. func DBStatement(val string) attribute.KeyValue { return DBStatementKey.String(val) } // DBOperation returns an attribute KeyValue conforming to the // "db.operation" semantic conventions. It represents the name of the operation // being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. func DBOperation(val string) attribute.KeyValue { return DBOperationKey.String(val) } // Connection-level attributes for Microsoft SQL Server const ( // DBMSSQLInstanceNameKey is the attribute Key conforming to the // "db.mssql.instance_name" semantic conventions. It represents the // Microsoft SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named // instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no // longer required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // DBMSSQLInstanceName returns an attribute KeyValue conforming to the // "db.mssql.instance_name" semantic conventions. It represents the Microsoft // SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. func DBMSSQLInstanceName(val string) attribute.KeyValue { return DBMSSQLInstanceNameKey.String(val) } // Call-level attributes for Cassandra const ( // DBCassandraPageSizeKey is the attribute Key conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch // size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // DBCassandraConsistencyLevelKey is the attribute Key conforming to the // "db.cassandra.consistency_level" semantic conventions. It represents the // consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // DBCassandraTableKey is the attribute Key conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace // name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra // rather than sql. It is not recommended to attempt any client-side // parsing of `db.statement` just to get this property, but it should be // set if it is provided by the library being instrumented. If the // operation is acting upon an anonymous table, or more than one table, // this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // DBCassandraIdempotenceKey is the attribute Key conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the // whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming // to the "db.cassandra.speculative_execution_count" semantic conventions. // It represents the number of times a query was speculatively executed. // Not set or `0` if the query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // DBCassandraCoordinatorIDKey is the attribute Key conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID // of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // DBCassandraCoordinatorDCKey is the attribute Key conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the // data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // DBCassandraPageSize returns an attribute KeyValue conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch size // used for paging, i.e. how many rows will be returned at once. func DBCassandraPageSize(val int) attribute.KeyValue { return DBCassandraPageSizeKey.Int(val) } // DBCassandraTable returns an attribute KeyValue conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace name // (if applicable). func DBCassandraTable(val string) attribute.KeyValue { return DBCassandraTableKey.String(val) } // DBCassandraIdempotence returns an attribute KeyValue conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the whether // or not the query is idempotent. func DBCassandraIdempotence(val bool) attribute.KeyValue { return DBCassandraIdempotenceKey.Bool(val) } // DBCassandraSpeculativeExecutionCount returns an attribute KeyValue // conforming to the "db.cassandra.speculative_execution_count" semantic // conventions. It represents the number of times a query was speculatively // executed. Not set or `0` if the query was not executed speculatively. func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { return DBCassandraSpeculativeExecutionCountKey.Int(val) } // DBCassandraCoordinatorID returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID of // the coordinating node for a query. func DBCassandraCoordinatorID(val string) attribute.KeyValue { return DBCassandraCoordinatorIDKey.String(val) } // DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the data // center of the coordinating node for a query. func DBCassandraCoordinatorDC(val string) attribute.KeyValue { return DBCassandraCoordinatorDCKey.String(val) } // Call-level attributes for Redis const ( // DBRedisDBIndexKey is the attribute Key conforming to the // "db.redis.database_index" semantic conventions. It represents the index // of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To // be used instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default // database (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // DBRedisDBIndex returns an attribute KeyValue conforming to the // "db.redis.database_index" semantic conventions. It represents the index of // the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be // used instead of the generic `db.name` attribute. func DBRedisDBIndex(val int) attribute.KeyValue { return DBRedisDBIndexKey.Int(val) } // Call-level attributes for MongoDB const ( // DBMongoDBCollectionKey is the attribute Key conforming to the // "db.mongodb.collection" semantic conventions. It represents the // collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // DBMongoDBCollection returns an attribute KeyValue conforming to the // "db.mongodb.collection" semantic conventions. It represents the collection // being accessed within the database stated in `db.name`. func DBMongoDBCollection(val string) attribute.KeyValue { return DBMongoDBCollectionKey.String(val) } // Call-level attributes for SQL databases const ( // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that // the operation is acting upon, including the database name (if // applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting // upon an anonymous table, or more than one table, this value MUST NOT be // set. DBSQLTableKey = attribute.Key("db.sql.table") ) // DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that the // operation is acting upon, including the database name (if applicable). func DBSQLTable(val string) attribute.KeyValue { return DBSQLTableKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's // concepts. const ( // OtelStatusCodeKey is the attribute Key conforming to the // "otel.status_code" semantic conventions. It represents the name of the // code, either "OK" or "ERROR". MUST NOT be set if the status code is // UNSET. // // Type: Enum // RequirementLevel: Optional // Stability: stable OtelStatusCodeKey = attribute.Key("otel.status_code") // OtelStatusDescriptionKey is the attribute Key conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'resource not found' OtelStatusDescriptionKey = attribute.Key("otel.status_description") ) var ( // The operation has been validated by an Application developer or Operator to have completed successfully OtelStatusCodeOk = OtelStatusCodeKey.String("OK") // The operation contains an error OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") ) // OtelStatusDescription returns an attribute KeyValue conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. func OtelStatusDescription(val string) attribute.KeyValue { return OtelStatusDescriptionKey.String(val) } // This semantic convention describes an instance of a function that runs // without provisioning or managing of servers (also known as serverless // functions or Function as a Service (FaaS)) with spans. const ( // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" // semantic conventions. It represents the type of the trigger which caused // this function execution. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" // semantic conventions. It represents the execution ID of the current // function execution. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // FaaSExecution returns an attribute KeyValue conforming to the // "faas.execution" semantic conventions. It represents the execution ID of the // current function execution. func FaaSExecution(val string) attribute.KeyValue { return FaaSExecutionKey.String(val) } // Semantic Convention for FaaS triggered as a response to some data source // operation such as a database or filesystem read/write. const ( // FaaSDocumentCollectionKey is the attribute Key conforming to the // "faas.document.collection" semantic conventions. It represents the name // of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in // Cosmos DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // FaaSDocumentOperationKey is the attribute Key conforming to the // "faas.document.operation" semantic conventions. It represents the // describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // FaaSDocumentTimeKey is the attribute Key conforming to the // "faas.document.time" semantic conventions. It represents a string // containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // FaaSDocumentNameKey is the attribute Key conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or // S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // FaaSDocumentCollection returns an attribute KeyValue conforming to the // "faas.document.collection" semantic conventions. It represents the name of // the source on which the triggering operation was performed. For example, in // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the // database name. func FaaSDocumentCollection(val string) attribute.KeyValue { return FaaSDocumentCollectionKey.String(val) } // FaaSDocumentTime returns an attribute KeyValue conforming to the // "faas.document.time" semantic conventions. It represents a string containing // the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSDocumentTime(val string) attribute.KeyValue { return FaaSDocumentTimeKey.String(val) } // FaaSDocumentName returns an attribute KeyValue conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or S3 // is the name of the file, and in Cosmos DB the table name. func FaaSDocumentName(val string) attribute.KeyValue { return FaaSDocumentNameKey.String(val) } // Semantic Convention for FaaS scheduled to be executed regularly. const ( // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic // conventions. It represents a string containing the function invocation // time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic // conventions. It represents a string containing the schedule period as // [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // FaaSTime returns an attribute KeyValue conforming to the "faas.time" // semantic conventions. It represents a string containing the function // invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSTime(val string) attribute.KeyValue { return FaaSTimeKey.String(val) } // FaaSCron returns an attribute KeyValue conforming to the "faas.cron" // semantic conventions. It represents a string containing the schedule period // as [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). func FaaSCron(val string) attribute.KeyValue { return FaaSCronKey.String(val) } // Contains additional attributes for incoming FaaS spans. const ( // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" // semantic conventions. It represents a boolean that is true if the // serverless function is executed for the first time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // FaaSColdstart returns an attribute KeyValue conforming to the // "faas.coldstart" semantic conventions. It represents a boolean that is true // if the serverless function is executed for the first time (aka cold-start). func FaaSColdstart(val bool) attribute.KeyValue { return FaaSColdstartKey.Bool(val) } // Contains additional attributes for outgoing FaaS spans. const ( // FaaSInvokedNameKey is the attribute Key conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the // invoked function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // FaaSInvokedProviderKey is the attribute Key conforming to the // "faas.invoked_provider" semantic conventions. It represents the cloud // provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the // invoked function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // FaaSInvokedRegionKey is the attribute Key conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud // region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like // AWS or GCP, the region in which a function is hosted is essential to // uniquely identify the function and also part of its endpoint. Since it's // part of the endpoint being called, the region is always known to // clients. In these cases, `faas.invoked_region` MUST be set accordingly. // If the region is unknown to the client or not required for identifying // the invoked function, setting `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the // invoked function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // FaaSInvokedName returns an attribute KeyValue conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. func FaaSInvokedName(val string) attribute.KeyValue { return FaaSInvokedNameKey.String(val) } // FaaSInvokedRegion returns an attribute KeyValue conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud region // of the invoked function. func FaaSInvokedRegion(val string) attribute.KeyValue { return FaaSInvokedRegionKey.String(val) } // These attributes may be used for any network related operation. const ( // NetTransportKey is the attribute Key conforming to the "net.transport" // semantic conventions. It represents the transport protocol used. See // note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") // NetAppProtocolNameKey is the attribute Key conforming to the // "net.app.protocol.name" semantic conventions. It represents the // application layer protocol used. The value SHOULD be normalized to // lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") // NetAppProtocolVersionKey is the attribute Key conforming to the // "net.app.protocol.version" semantic conventions. It represents the // version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `net.app.protocol.version` refers to the version of the protocol // used and might be different from the protocol client's version. If the // HTTP client used has a version of `0.27.2`, but sends HTTP version // `1.1`, this attribute should be set to `1.1`. NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") // NetSockPeerNameKey is the attribute Key conforming to the // "net.sock.peer.name" semantic conventions. It represents the remote // socket peer name. // // Type: string // RequirementLevel: Recommended (If available and different from // `net.peer.name` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // NetSockPeerAddrKey is the attribute Key conforming to the // "net.sock.peer.addr" semantic conventions. It represents the remote // socket peer address: IPv4 or IPv6 for internet protocols, path for local // communication, // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // NetSockPeerPortKey is the attribute Key conforming to the // "net.sock.peer.port" semantic conventions. It represents the remote // socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.peer.port` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // NetSockFamilyKey is the attribute Key conforming to the // "net.sock.family" semantic conventions. It represents the protocol // [address // family](https://man7.org/linux/man-pages/man7/address_families.7.html) // which is used for communication. // // Type: Enum // RequirementLevel: ConditionallyRequired (If different than `inet` and if // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" // semantic conventions. It represents the logical remote hostname, see // note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an // extra DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" // semantic conventions. It represents the logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // NetHostNameKey is the attribute Key conforming to the "net.host.name" // semantic conventions. It represents the logical local hostname or // similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // NetHostPortKey is the attribute Key conforming to the "net.host.port" // semantic conventions. It represents the logical local port number, // preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // NetSockHostAddrKey is the attribute Key conforming to the // "net.sock.host.addr" semantic conventions. It represents the local // socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // NetSockHostPortKey is the attribute Key conforming to the // "net.sock.host.port" semantic conventions. It represents the local // socket port number. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.host.port` and if `net.sock.host.addr` is set.) // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") // NetHostConnectionTypeKey is the attribute Key conforming to the // "net.host.connection.type" semantic conventions. It represents the // internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // NetHostConnectionSubtypeKey is the attribute Key conforming to the // "net.host.connection.subtype" semantic conventions. It represents the // this describes more details regarding the connection.type. It may be the // type of cell technology connection, but it could be used for describing // details about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // NetHostCarrierNameKey is the attribute Key conforming to the // "net.host.carrier.name" semantic conventions. It represents the name of // the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // NetHostCarrierMccKey is the attribute Key conforming to the // "net.host.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // NetHostCarrierMncKey is the attribute Key conforming to the // "net.host.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // NetHostCarrierIccKey is the attribute Key conforming to the // "net.host.carrier.icc" semantic conventions. It represents the ISO // 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // NetAppProtocolName returns an attribute KeyValue conforming to the // "net.app.protocol.name" semantic conventions. It represents the application // layer protocol used. The value SHOULD be normalized to lowercase. func NetAppProtocolName(val string) attribute.KeyValue { return NetAppProtocolNameKey.String(val) } // NetAppProtocolVersion returns an attribute KeyValue conforming to the // "net.app.protocol.version" semantic conventions. It represents the version // of the application layer protocol used. See note below. func NetAppProtocolVersion(val string) attribute.KeyValue { return NetAppProtocolVersionKey.String(val) } // NetSockPeerName returns an attribute KeyValue conforming to the // "net.sock.peer.name" semantic conventions. It represents the remote socket // peer name. func NetSockPeerName(val string) attribute.KeyValue { return NetSockPeerNameKey.String(val) } // NetSockPeerAddr returns an attribute KeyValue conforming to the // "net.sock.peer.addr" semantic conventions. It represents the remote socket // peer address: IPv4 or IPv6 for internet protocols, path for local // communication, // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). func NetSockPeerAddr(val string) attribute.KeyValue { return NetSockPeerAddrKey.String(val) } // NetSockPeerPort returns an attribute KeyValue conforming to the // "net.sock.peer.port" semantic conventions. It represents the remote socket // peer port. func NetSockPeerPort(val int) attribute.KeyValue { return NetSockPeerPortKey.Int(val) } // NetPeerName returns an attribute KeyValue conforming to the // "net.peer.name" semantic conventions. It represents the logical remote // hostname, see note below. func NetPeerName(val string) attribute.KeyValue { return NetPeerNameKey.String(val) } // NetPeerPort returns an attribute KeyValue conforming to the // "net.peer.port" semantic conventions. It represents the logical remote port // number func NetPeerPort(val int) attribute.KeyValue { return NetPeerPortKey.Int(val) } // NetHostName returns an attribute KeyValue conforming to the // "net.host.name" semantic conventions. It represents the logical local // hostname or similar, see note below. func NetHostName(val string) attribute.KeyValue { return NetHostNameKey.String(val) } // NetHostPort returns an attribute KeyValue conforming to the // "net.host.port" semantic conventions. It represents the logical local port // number, preferably the one that the peer used to connect func NetHostPort(val int) attribute.KeyValue { return NetHostPortKey.Int(val) } // NetSockHostAddr returns an attribute KeyValue conforming to the // "net.sock.host.addr" semantic conventions. It represents the local socket // address. Useful in case of a multi-IP host. func NetSockHostAddr(val string) attribute.KeyValue { return NetSockHostAddrKey.String(val) } // NetSockHostPort returns an attribute KeyValue conforming to the // "net.sock.host.port" semantic conventions. It represents the local socket // port number. func NetSockHostPort(val int) attribute.KeyValue { return NetSockHostPortKey.Int(val) } // NetHostCarrierName returns an attribute KeyValue conforming to the // "net.host.carrier.name" semantic conventions. It represents the name of the // mobile carrier. func NetHostCarrierName(val string) attribute.KeyValue { return NetHostCarrierNameKey.String(val) } // NetHostCarrierMcc returns an attribute KeyValue conforming to the // "net.host.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. func NetHostCarrierMcc(val string) attribute.KeyValue { return NetHostCarrierMccKey.String(val) } // NetHostCarrierMnc returns an attribute KeyValue conforming to the // "net.host.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. func NetHostCarrierMnc(val string) attribute.KeyValue { return NetHostCarrierMncKey.String(val) } // NetHostCarrierIcc returns an attribute KeyValue conforming to the // "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 // alpha-2 2-character country code associated with the mobile carrier network. func NetHostCarrierIcc(val string) attribute.KeyValue { return NetHostCarrierIccKey.String(val) } // Operations that access some remote service. const ( // PeerServiceKey is the attribute Key conforming to the "peer.service" // semantic conventions. It represents the // [`service.name`](../../resource/semantic_conventions/README.md#service) // of the remote service. SHOULD be equal to the actual `service.name` // resource attribute of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // PeerService returns an attribute KeyValue conforming to the // "peer.service" semantic conventions. It represents the // [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. func PeerService(val string) attribute.KeyValue { return PeerServiceKey.String(val) } // These attributes may be used for any operation with an authenticated and/or // authorized enduser. const ( // EnduserIDKey is the attribute Key conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted // from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header // in the inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // EnduserRoleKey is the attribute Key conforming to the "enduser.role" // semantic conventions. It represents the actual/assumed role the client // is making the request under extracted from token or application security // context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" // semantic conventions. It represents the scopes or granted authorities // the client currently possesses extracted from token or application // security context. The value would come from the scope associated with an // [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // EnduserID returns an attribute KeyValue conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted from // the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in // the inbound request from outside the system. func EnduserID(val string) attribute.KeyValue { return EnduserIDKey.String(val) } // EnduserRole returns an attribute KeyValue conforming to the // "enduser.role" semantic conventions. It represents the actual/assumed role // the client is making the request under extracted from token or application // security context. func EnduserRole(val string) attribute.KeyValue { return EnduserRoleKey.String(val) } // EnduserScope returns an attribute KeyValue conforming to the // "enduser.scope" semantic conventions. It represents the scopes or granted // authorities the client currently possesses extracted from token or // application security context. The value would come from the scope associated // with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). func EnduserScope(val string) attribute.KeyValue { return EnduserScopeKey.String(val) } // These attributes may be used for any operation to store information about a // thread that started a span. const ( // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic // conventions. It represents the current "managed" thread ID (as opposed // to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // ThreadNameKey is the attribute Key conforming to the "thread.name" // semantic conventions. It represents the current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // ThreadID returns an attribute KeyValue conforming to the "thread.id" // semantic conventions. It represents the current "managed" thread ID (as // opposed to OS thread ID). func ThreadID(val int) attribute.KeyValue { return ThreadIDKey.Int(val) } // ThreadName returns an attribute KeyValue conforming to the "thread.name" // semantic conventions. It represents the current thread name. func ThreadName(val string) attribute.KeyValue { return ThreadNameKey.String(val) } // These attributes allow to report this unit of code and therefore to provide // more context about the span. const ( // CodeFunctionKey is the attribute Key conforming to the "code.function" // semantic conventions. It represents the method or function name, or // equivalent (usually rightmost part of the code unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" // semantic conventions. It represents the "namespace" within which // `code.function` is defined. Usually the qualified class or module name, // such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // CodeFilepathKey is the attribute Key conforming to the "code.filepath" // semantic conventions. It represents the source code file name that // identifies the code unit as uniquely as possible (preferably an absolute // file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") // CodeColumnKey is the attribute Key conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 16 CodeColumnKey = attribute.Key("code.column") ) // CodeFunction returns an attribute KeyValue conforming to the // "code.function" semantic conventions. It represents the method or function // name, or equivalent (usually rightmost part of the code unit's name). func CodeFunction(val string) attribute.KeyValue { return CodeFunctionKey.String(val) } // CodeNamespace returns an attribute KeyValue conforming to the // "code.namespace" semantic conventions. It represents the "namespace" within // which `code.function` is defined. Usually the qualified class or module // name, such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. func CodeNamespace(val string) attribute.KeyValue { return CodeNamespaceKey.String(val) } // CodeFilepath returns an attribute KeyValue conforming to the // "code.filepath" semantic conventions. It represents the source code file // name that identifies the code unit as uniquely as possible (preferably an // absolute file path). func CodeFilepath(val string) attribute.KeyValue { return CodeFilepathKey.String(val) } // CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` best // representing the operation. It SHOULD point within the code unit named in // `code.function`. func CodeLineNumber(val int) attribute.KeyValue { return CodeLineNumberKey.Int(val) } // CodeColumn returns an attribute KeyValue conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit named // in `code.function`. func CodeColumn(val int) attribute.KeyValue { return CodeColumnKey.Int(val) } // Semantic conventions for HTTP client and server Spans. const ( // HTTPMethodKey is the attribute Key conforming to the "http.method" // semantic conventions. It represents the hTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // HTTPStatusCodeKey is the attribute Key conforming to the // "http.status_code" semantic conventions. It represents the [HTTP // response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was // received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" // semantic conventions. It represents the kind of HTTP protocol used. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is // assumed. HTTPFlavorKey = attribute.Key("http.flavor") // HTTPUserAgentKey is the attribute Key conforming to the // "http.user_agent" semantic conventions. It represents the value of the // [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // HTTPRequestContentLengthKey is the attribute Key conforming to the // "http.request_content_length" semantic conventions. It represents the // size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // HTTPResponseContentLengthKey is the attribute Key conforming to the // "http.response_content_length" semantic conventions. It represents the // size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // HTTPMethod returns an attribute KeyValue conforming to the "http.method" // semantic conventions. It represents the hTTP request method. func HTTPMethod(val string) attribute.KeyValue { return HTTPMethodKey.String(val) } // HTTPStatusCode returns an attribute KeyValue conforming to the // "http.status_code" semantic conventions. It represents the [HTTP response // status code](https://tools.ietf.org/html/rfc7231#section-6). func HTTPStatusCode(val int) attribute.KeyValue { return HTTPStatusCodeKey.Int(val) } // HTTPUserAgent returns an attribute KeyValue conforming to the // "http.user_agent" semantic conventions. It represents the value of the [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. func HTTPUserAgent(val string) attribute.KeyValue { return HTTPUserAgentKey.String(val) } // HTTPRequestContentLength returns an attribute KeyValue conforming to the // "http.request_content_length" semantic conventions. It represents the size // of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPRequestContentLength(val int) attribute.KeyValue { return HTTPRequestContentLengthKey.Int(val) } // HTTPResponseContentLength returns an attribute KeyValue conforming to the // "http.response_content_length" semantic conventions. It represents the size // of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPResponseContentLength(val int) attribute.KeyValue { return HTTPResponseContentLengthKey.Int(val) } // Semantic Convention for HTTP Client const ( // HTTPURLKey is the attribute Key conforming to the "http.url" semantic // conventions. It represents the full HTTP request URL in the form // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is // not transmitted over HTTP, but if it is known, it should be included // nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the // attribute's value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // HTTPResendCountKey is the attribute Key conforming to the // "http.resend_count" semantic conventions. It represents the ordinal // number of request resending attempt (for any reason, including // redirects). // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 // Note: The resend count SHOULD be updated each time an HTTP request gets // resent by the client, regardless of what was the cause of the resending // (e.g. redirection, authorization failure, 503 Server Unavailable, // network issues, or any other). HTTPResendCountKey = attribute.Key("http.resend_count") ) // HTTPURL returns an attribute KeyValue conforming to the "http.url" // semantic conventions. It represents the full HTTP request URL in the form // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not // transmitted over HTTP, but if it is known, it should be included // nevertheless. func HTTPURL(val string) attribute.KeyValue { return HTTPURLKey.String(val) } // HTTPResendCount returns an attribute KeyValue conforming to the // "http.resend_count" semantic conventions. It represents the ordinal number // of request resending attempt (for any reason, including redirects). func HTTPResendCount(val int) attribute.KeyValue { return HTTPResendCountKey.Int(val) } // Semantic Convention for HTTP Server const ( // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" // semantic conventions. It represents the URI scheme identifying the used // protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // HTTPTargetKey is the attribute Key conforming to the "http.target" // semantic conventions. It represents the full request target as passed in // a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/path/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") // HTTPRouteKey is the attribute Key conforming to the "http.route" // semantic conventions. It represents the matched route (path template in // the format used by the respective server framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: 'http.route' MUST NOT be populated when this is not supported by // the HTTP server framework as the route attribute should have // low-cardinality and the URI path can NOT substitute it. HTTPRouteKey = attribute.Key("http.route") // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" // semantic conventions. It represents the IP address of the original // client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.sock.peer.addr`, which // would // identify the network-level peer, which may be a proxy. // // This attribute should be set when a source of information different // from the one used for `net.sock.peer.addr`, is available even if that // other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.sock.peer.addr` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" // semantic conventions. It represents the URI scheme identifying the used // protocol. func HTTPScheme(val string) attribute.KeyValue { return HTTPSchemeKey.String(val) } // HTTPTarget returns an attribute KeyValue conforming to the "http.target" // semantic conventions. It represents the full request target as passed in a // HTTP request line or equivalent. func HTTPTarget(val string) attribute.KeyValue { return HTTPTargetKey.String(val) } // HTTPRoute returns an attribute KeyValue conforming to the "http.route" // semantic conventions. It represents the matched route (path template in the // format used by the respective server framework). See note below func HTTPRoute(val string) attribute.KeyValue { return HTTPRouteKey.String(val) } // HTTPClientIP returns an attribute KeyValue conforming to the // "http.client_ip" semantic conventions. It represents the IP address of the // original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). func HTTPClientIP(val string) attribute.KeyValue { return HTTPClientIPKey.String(val) } // Attributes that exist for multiple DynamoDB request types. const ( // AWSDynamoDBTableNamesKey is the attribute Key conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys // in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number }, "TableName": "string", // "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to // the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` // response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, // "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` // request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. // It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the // value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // AWSDynamoDBProjectionKey is the attribute Key conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value // of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, // RelatedItems, ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // AWSDynamoDBLimitKey is the attribute Key conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of // the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // AWSDynamoDBIndexNameKey is the attribute Key conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value // of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // AWSDynamoDBSelectKey is the attribute Key conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of // the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // AWSDynamoDBTableNames returns an attribute KeyValue conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys in // the `RequestItems` object field. func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { return AWSDynamoDBTableNamesKey.StringSlice(val) } // AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to // the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response field. func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { return AWSDynamoDBConsumedCapacityKey.StringSlice(val) } // AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming // to the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` response // field. func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { return AWSDynamoDBItemCollectionMetricsKey.String(val) } // AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_read_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.ReadCapacityUnits` request parameter. func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) } // AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_write_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) } // AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the value // of the `ConsistentRead` request parameter. func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { return AWSDynamoDBConsistentReadKey.Bool(val) } // AWSDynamoDBProjection returns an attribute KeyValue conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value of // the `ProjectionExpression` request parameter. func AWSDynamoDBProjection(val string) attribute.KeyValue { return AWSDynamoDBProjectionKey.String(val) } // AWSDynamoDBLimit returns an attribute KeyValue conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of the // `Limit` request parameter. func AWSDynamoDBLimit(val int) attribute.KeyValue { return AWSDynamoDBLimitKey.Int(val) } // AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to // the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { return AWSDynamoDBAttributesToGetKey.StringSlice(val) } // AWSDynamoDBIndexName returns an attribute KeyValue conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value of // the `IndexName` request parameter. func AWSDynamoDBIndexName(val string) attribute.KeyValue { return AWSDynamoDBIndexNameKey.String(val) } // AWSDynamoDBSelect returns an attribute KeyValue conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of the // `Select` request parameter. func AWSDynamoDBSelect(val string) attribute.KeyValue { return AWSDynamoDBSelectKey.String(val) } // DynamoDB.CreateTable const ( // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_indexes" semantic // conventions. It represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) } // AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming // to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) } // DynamoDB.ListTables const ( // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents // the value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // AWSDynamoDBTableCountKey is the attribute Key conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming // to the "aws.dynamodb.exclusive_start_table" semantic conventions. It // represents the value of the `ExclusiveStartTableName` request parameter. func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { return AWSDynamoDBExclusiveStartTableKey.String(val) } // AWSDynamoDBTableCount returns an attribute KeyValue conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. func AWSDynamoDBTableCount(val int) attribute.KeyValue { return AWSDynamoDBTableCountKey.Int(val) } // DynamoDB.Query const ( // AWSDynamoDBScanForwardKey is the attribute Key conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the // value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // AWSDynamoDBScanForward returns an attribute KeyValue conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of // the `ScanIndexForward` request parameter. func AWSDynamoDBScanForward(val bool) attribute.KeyValue { return AWSDynamoDBScanForwardKey.Bool(val) } // DynamoDB.Scan const ( // AWSDynamoDBSegmentKey is the attribute Key conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of // the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the // value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // AWSDynamoDBCountKey is the attribute Key conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of // the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // AWSDynamoDBScannedCountKey is the attribute Key conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the // value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // AWSDynamoDBSegment returns an attribute KeyValue conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of the // `Segment` request parameter. func AWSDynamoDBSegment(val int) attribute.KeyValue { return AWSDynamoDBSegmentKey.Int(val) } // AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the value // of the `TotalSegments` request parameter. func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { return AWSDynamoDBTotalSegmentsKey.Int(val) } // AWSDynamoDBCount returns an attribute KeyValue conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of the // `Count` response parameter. func AWSDynamoDBCount(val int) attribute.KeyValue { return AWSDynamoDBCountKey.Int(val) } // AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the value // of the `ScannedCount` response parameter. func AWSDynamoDBScannedCount(val int) attribute.KeyValue { return AWSDynamoDBScannedCountKey.Int(val) } // DynamoDB.UpdateTable const ( // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to // the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the // the `GlobalSecondaryIndexUpdates` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming // to the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) } // AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the the // `GlobalSecondaryIndexUpdates` request field. func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) } // Semantic conventions to apply when instrumenting the GraphQL implementation. // They map GraphQL operations to attributes on a Span. const ( // GraphqlOperationNameKey is the attribute Key conforming to the // "graphql.operation.name" semantic conventions. It represents the name of // the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // GraphqlOperationTypeKey is the attribute Key conforming to the // "graphql.operation.type" semantic conventions. It represents the type of // the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // GraphqlDocumentKey is the attribute Key conforming to the // "graphql.document" semantic conventions. It represents the GraphQL // document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // GraphqlOperationName returns an attribute KeyValue conforming to the // "graphql.operation.name" semantic conventions. It represents the name of the // operation being executed. func GraphqlOperationName(val string) attribute.KeyValue { return GraphqlOperationNameKey.String(val) } // GraphqlDocument returns an attribute KeyValue conforming to the // "graphql.document" semantic conventions. It represents the GraphQL document // being executed. func GraphqlDocument(val string) attribute.KeyValue { return GraphqlDocumentKey.String(val) } // Semantic convention describing per-message attributes populated on messaging // spans or links. const ( // MessagingMessageIDKey is the attribute Key conforming to the // "messaging.message.id" semantic conventions. It represents a value used // by the messaging system as an identifier for the message, represented as // a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message.id") // MessagingMessageConversationIDKey is the attribute Key conforming to the // "messaging.message.conversation_id" semantic conventions. It represents // the [conversation ID](#conversations) identifying the conversation to // which the message belongs, represented as a string. Sometimes called // "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to // the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also // use this attribute if it is unknown whether the compressed or // uncompressed payload size is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key // conforming to the "messaging.message.payload_compressed_size_bytes" // semantic conventions. It represents the compressed size of the message // payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ) // MessagingMessageID returns an attribute KeyValue conforming to the // "messaging.message.id" semantic conventions. It represents a value used by // the messaging system as an identifier for the message, represented as a // string. func MessagingMessageID(val string) attribute.KeyValue { return MessagingMessageIDKey.String(val) } // MessagingMessageConversationID returns an attribute KeyValue conforming // to the "messaging.message.conversation_id" semantic conventions. It // represents the [conversation ID](#conversations) identifying the // conversation to which the message belongs, represented as a string. // Sometimes called "Correlation ID". func MessagingMessageConversationID(val string) attribute.KeyValue { return MessagingMessageConversationIDKey.String(val) } // MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming // to the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also use // this attribute if it is unknown whether the compressed or uncompressed // payload size is reported. func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadSizeBytesKey.Int(val) } // MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue // conforming to the "messaging.message.payload_compressed_size_bytes" semantic // conventions. It represents the compressed size of the message payload in // bytes. func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) } // Semantic convention for attributes that describe messaging destination on // broker const ( // MessagingDestinationNameKey is the attribute Key conforming to the // "messaging.destination.name" semantic conventions. It represents the // message destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Destination name SHOULD uniquely identify a specific queue, topic // or other entity within the broker. If // the broker does not have such notion, the destination name SHOULD // uniquely identify the broker. MessagingDestinationNameKey = attribute.Key("messaging.destination.name") // MessagingDestinationKindKey is the attribute Key conforming to the // "messaging.destination.kind" semantic conventions. It represents the // kind of message destination // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") // MessagingDestinationTemplateKey is the attribute Key conforming to the // "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Destination names could be constructed from templates. An example // would be a destination name involving a user name or product id. // Although the destination name in this case is of high cardinality, the // underlying template is of low cardinality and can be effectively used // for grouping and aggregation. MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") // MessagingDestinationTemporaryKey is the attribute Key conforming to the // "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might // not exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") // MessagingDestinationAnonymousKey is the attribute Key conforming to the // "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // MessagingDestinationName returns an attribute KeyValue conforming to the // "messaging.destination.name" semantic conventions. It represents the message // destination name func MessagingDestinationName(val string) attribute.KeyValue { return MessagingDestinationNameKey.String(val) } // MessagingDestinationTemplate returns an attribute KeyValue conforming to // the "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name func MessagingDestinationTemplate(val string) attribute.KeyValue { return MessagingDestinationTemplateKey.String(val) } // MessagingDestinationTemporary returns an attribute KeyValue conforming to // the "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might not // exist anymore after messages are processed. func MessagingDestinationTemporary(val bool) attribute.KeyValue { return MessagingDestinationTemporaryKey.Bool(val) } // MessagingDestinationAnonymous returns an attribute KeyValue conforming to // the "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). func MessagingDestinationAnonymous(val bool) attribute.KeyValue { return MessagingDestinationAnonymousKey.Bool(val) } // Semantic convention for attributes that describe messaging source on broker const ( // MessagingSourceNameKey is the attribute Key conforming to the // "messaging.source.name" semantic conventions. It represents the message // source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Source name SHOULD uniquely identify a specific queue, topic, or // other entity within the broker. If // the broker does not have such notion, the source name SHOULD uniquely // identify the broker. MessagingSourceNameKey = attribute.Key("messaging.source.name") // MessagingSourceKindKey is the attribute Key conforming to the // "messaging.source.kind" semantic conventions. It represents the kind of // message source // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingSourceKindKey = attribute.Key("messaging.source.kind") // MessagingSourceTemplateKey is the attribute Key conforming to the // "messaging.source.template" semantic conventions. It represents the low // cardinality representation of the messaging source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Source names could be constructed from templates. An example would // be a source name involving a user name or product id. Although the // source name in this case is of high cardinality, the underlying template // is of low cardinality and can be effectively used for grouping and // aggregation. MessagingSourceTemplateKey = attribute.Key("messaging.source.template") // MessagingSourceTemporaryKey is the attribute Key conforming to the // "messaging.source.temporary" semantic conventions. It represents a // boolean that is true if the message source is temporary and might not // exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") // MessagingSourceAnonymousKey is the attribute Key conforming to the // "messaging.source.anonymous" semantic conventions. It represents a // boolean that is true if the message source is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") ) var ( // A message received from a queue MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") // A message received from a topic MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") ) // MessagingSourceName returns an attribute KeyValue conforming to the // "messaging.source.name" semantic conventions. It represents the message // source name func MessagingSourceName(val string) attribute.KeyValue { return MessagingSourceNameKey.String(val) } // MessagingSourceTemplate returns an attribute KeyValue conforming to the // "messaging.source.template" semantic conventions. It represents the low // cardinality representation of the messaging source name func MessagingSourceTemplate(val string) attribute.KeyValue { return MessagingSourceTemplateKey.String(val) } // MessagingSourceTemporary returns an attribute KeyValue conforming to the // "messaging.source.temporary" semantic conventions. It represents a boolean // that is true if the message source is temporary and might not exist anymore // after messages are processed. func MessagingSourceTemporary(val bool) attribute.KeyValue { return MessagingSourceTemporaryKey.Bool(val) } // MessagingSourceAnonymous returns an attribute KeyValue conforming to the // "messaging.source.anonymous" semantic conventions. It represents a boolean // that is true if the message source is anonymous (could be unnamed or have // auto-generated name). func MessagingSourceAnonymous(val bool) attribute.KeyValue { return MessagingSourceAnonymousKey.Bool(val) } // General attributes used in messaging systems. const ( // MessagingSystemKey is the attribute Key conforming to the // "messaging.system" semantic conventions. It represents a string // identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // MessagingOperationKey is the attribute Key conforming to the // "messaging.operation" semantic conventions. It represents a string // identifying the kind of messaging operation as defined in the [Operation // names](#operation-names) section above. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: If a custom value is used, it MUST be of low cardinality. MessagingOperationKey = attribute.Key("messaging.operation") // MessagingBatchMessageCountKey is the attribute Key conforming to the // "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the // batching operation. // // Type: int // RequirementLevel: ConditionallyRequired (If the span describes an // operation on a batch of messages.) // Stability: stable // Examples: 0, 1, 2 // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on // spans that operate with a single message. When a messaging client // library supports both batch and single-message API for the same // operation, instrumentations SHOULD use `messaging.batch.message_count` // for batching APIs and SHOULD NOT use it for single-message APIs. MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ) var ( // publish MessagingOperationPublish = MessagingOperationKey.String("publish") // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // MessagingSystem returns an attribute KeyValue conforming to the // "messaging.system" semantic conventions. It represents a string identifying // the messaging system. func MessagingSystem(val string) attribute.KeyValue { return MessagingSystemKey.String(val) } // MessagingBatchMessageCount returns an attribute KeyValue conforming to // the "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the batching // operation. func MessagingBatchMessageCount(val int) attribute.KeyValue { return MessagingBatchMessageCountKey.Int(val) } // Semantic convention for a consumer of messages received from a messaging // system const ( // MessagingConsumerIDKey is the attribute Key conforming to the // "messaging.consumer.id" semantic conventions. It represents the // identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if // both are present, or only `messaging.kafka.consumer.group`. For brokers, // such as RabbitMQ and Artemis, set it to the `client_id` of the client // consuming the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") ) // MessagingConsumerID returns an attribute KeyValue conforming to the // "messaging.consumer.id" semantic conventions. It represents the identifier // for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both // are present, or only `messaging.kafka.consumer.group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. func MessagingConsumerID(val string) attribute.KeyValue { return MessagingConsumerIDKey.String(val) } // Attributes for RabbitMQ const ( // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ) // MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { return MessagingRabbitmqDestinationRoutingKeyKey.String(val) } // Attributes for Apache Kafka const ( // MessagingKafkaMessageKeyKey is the attribute Key conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure // they're processed on the same partition. They differ from // `messaging.message.id` in that they're not unique. If the key is `null`, // the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to // be supplied for the attribute. If the key has no unambiguous, canonical // string form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the // "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only // applies to consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") // MessagingKafkaClientIDKey is the attribute Key conforming to the // "messaging.kafka.client_id" semantic conventions. It represents the // client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to // the "messaging.kafka.destination.partition" semantic conventions. It // represents the partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the // "messaging.kafka.source.partition" semantic conventions. It represents // the partition the message is received from. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the // "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the // "messaging.kafka.message.tombstone" semantic conventions. It represents // a boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When // missing, the value is assumed to be `false`.) // Stability: stable MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ) // MessagingKafkaMessageKey returns an attribute KeyValue conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message.id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be // set. func MessagingKafkaMessageKey(val string) attribute.KeyValue { return MessagingKafkaMessageKeyKey.String(val) } // MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to // the "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only applies // to consumers, not producers. func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { return MessagingKafkaConsumerGroupKey.String(val) } // MessagingKafkaClientID returns an attribute KeyValue conforming to the // "messaging.kafka.client_id" semantic conventions. It represents the client // ID for the Consumer or Producer that is handling the message. func MessagingKafkaClientID(val string) attribute.KeyValue { return MessagingKafkaClientIDKey.String(val) } // MessagingKafkaDestinationPartition returns an attribute KeyValue // conforming to the "messaging.kafka.destination.partition" semantic // conventions. It represents the partition the message is sent to. func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { return MessagingKafkaDestinationPartitionKey.Int(val) } // MessagingKafkaSourcePartition returns an attribute KeyValue conforming to // the "messaging.kafka.source.partition" semantic conventions. It represents // the partition the message is received from. func MessagingKafkaSourcePartition(val int) attribute.KeyValue { return MessagingKafkaSourcePartitionKey.Int(val) } // MessagingKafkaMessageOffset returns an attribute KeyValue conforming to // the "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. func MessagingKafkaMessageOffset(val int) attribute.KeyValue { return MessagingKafkaMessageOffsetKey.Int(val) } // MessagingKafkaMessageTombstone returns an attribute KeyValue conforming // to the "messaging.kafka.message.tombstone" semantic conventions. It // represents a boolean that is true if the message is a tombstone. func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { return MessagingKafkaMessageTombstoneKey.Bool(val) } // Attributes for Apache RocketMQ const ( // MessagingRocketmqNamespaceKey is the attribute Key conforming to the // "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // MessagingRocketmqClientGroupKey is the attribute Key conforming to the // "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // MessagingRocketmqClientIDKey is the attribute Key conforming to the // "messaging.rocketmq.client_id" semantic conventions. It represents the // unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key // conforming to the "messaging.rocketmq.message.delivery_timestamp" // semantic conventions. It represents the timestamp in milliseconds that // the delay message is expected to be delivered to consumer. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delay time level is not specified.) // Stability: stable // Examples: 1665987217045 MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delivery timestamp is not specified.) // Stability: stable // Examples: 3 MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the // "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. // // Type: string // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) // Stability: stable // Examples: 'myMessageGroup' MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the // "messaging.rocketmq.message.type" semantic conventions. It represents // the type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") // MessagingRocketmqMessageTagKey is the attribute Key conforming to the // "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the // "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to // the "messaging.rocketmq.consumption_model" semantic conventions. It // represents the model of message consumption. This only applies to // consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // MessagingRocketmqNamespace returns an attribute KeyValue conforming to // the "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. func MessagingRocketmqNamespace(val string) attribute.KeyValue { return MessagingRocketmqNamespaceKey.String(val) } // MessagingRocketmqClientGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. func MessagingRocketmqClientGroup(val string) attribute.KeyValue { return MessagingRocketmqClientGroupKey.String(val) } // MessagingRocketmqClientID returns an attribute KeyValue conforming to the // "messaging.rocketmq.client_id" semantic conventions. It represents the // unique identifier for each client. func MessagingRocketmqClientID(val string) attribute.KeyValue { return MessagingRocketmqClientIDKey.String(val) } // MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic // conventions. It represents the timestamp in milliseconds that the delay // message is expected to be delivered to consumer. func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) } // MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) } // MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { return MessagingRocketmqMessageGroupKey.String(val) } // MessagingRocketmqMessageTag returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. func MessagingRocketmqMessageTag(val string) attribute.KeyValue { return MessagingRocketmqMessageTagKey.String(val) } // MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { return MessagingRocketmqMessageKeysKey.StringSlice(val) } // Semantic conventions for remote procedure calls. const ( // RPCSystemKey is the attribute Key conforming to the "rpc.system" // semantic conventions. It represents a string identifying the remoting // system. See below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // RPCServiceKey is the attribute Key conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the // service being called, including its package name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing // class. The `code.namespace` attribute may be used to store the latter // (despite the attribute name, it may include a class name; e.g., class // with method actually executing the call on the server side, RPC client // stub class on the client side). RPCServiceKey = attribute.Key("rpc.service") // RPCMethodKey is the attribute Key conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method // being called, must be equal to the $method part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the // latter (e.g., method actually executing the call on the server side, RPC // client stub method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // RPCService returns an attribute KeyValue conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the service // being called, including its package name, if applicable. func RPCService(val string) attribute.KeyValue { return RPCServiceKey.String(val) } // RPCMethod returns an attribute KeyValue conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method being // called, must be equal to the $method part in the span name. func RPCMethod(val string) attribute.KeyValue { return RPCMethodKey.String(val) } // Tech-specific attributes for gRPC. const ( // RPCGRPCStatusCodeKey is the attribute Key conforming to the // "rpc.grpc.status_code" semantic conventions. It represents the [numeric // status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of // the gRPC request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // RPCJsonrpcVersionKey is the attribute Key conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default // version (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // RPCJsonrpcRequestIDKey is the attribute Key conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be // cast to string for simplicity. Use empty string in case of `null` value. // Omit entirely if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPCJsonrpcVersion returns an attribute KeyValue conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. func RPCJsonrpcVersion(val string) attribute.KeyValue { return RPCJsonrpcVersionKey.String(val) } // RPCJsonrpcRequestID returns an attribute KeyValue conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, string, // `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit // entirely if this is a notification. func RPCJsonrpcRequestID(val string) attribute.KeyValue { return RPCJsonrpcRequestIDKey.String(val) } // RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. func RPCJsonrpcErrorCode(val int) attribute.KeyValue { return RPCJsonrpcErrorCodeKey.Int(val) } // RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { return RPCJsonrpcErrorMessageKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.18.0/000077500000000000000000000000001452547353200174715ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.18.0/doc.go000066400000000000000000000016641452547353200205740ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.18.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" opentelemetry-go-1.21.0/semconv/v1.18.0/event.go000066400000000000000000000173141452547353200211470ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" import "go.opentelemetry.io/otel/attribute" // This semantic convention defines the attributes used to represent a feature // flag evaluation as an event. const ( // FeatureFlagKeyKey is the attribute Key conforming to the // "feature_flag.key" semantic conventions. It represents the unique // identifier of the feature flag. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'logo-color' FeatureFlagKeyKey = attribute.Key("feature_flag.key") // FeatureFlagProviderNameKey is the attribute Key conforming to the // "feature_flag.provider_name" semantic conventions. It represents the // name of the service provider that performs the flag evaluation. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'Flag Manager' FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") // FeatureFlagVariantKey is the attribute Key conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be // a semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'red', 'true', 'on' // Note: A semantic identifier, commonly referred to as a variant, provides // a means // for referring to a value without including the value itself. This can // provide additional context for understanding the meaning behind a value. // For example, the variant `red` maybe be used for the value `#c05543`. // // A stringified version of the value can be used in situations where a // semantic identifier is unavailable. String representation of the value // should be determined by the implementer. FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ) // FeatureFlagKey returns an attribute KeyValue conforming to the // "feature_flag.key" semantic conventions. It represents the unique identifier // of the feature flag. func FeatureFlagKey(val string) attribute.KeyValue { return FeatureFlagKeyKey.String(val) } // FeatureFlagProviderName returns an attribute KeyValue conforming to the // "feature_flag.provider_name" semantic conventions. It represents the name of // the service provider that performs the flag evaluation. func FeatureFlagProviderName(val string) attribute.KeyValue { return FeatureFlagProviderNameKey.String(val) } // FeatureFlagVariant returns an attribute KeyValue conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be a // semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. func FeatureFlagVariant(val string) attribute.KeyValue { return FeatureFlagVariantKey.String(val) } // RPC received/sent message. const ( // MessageTypeKey is the attribute Key conforming to the "message.type" // semantic conventions. It represents the whether this is a received or // sent message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessageTypeKey = attribute.Key("message.type") // MessageIDKey is the attribute Key conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two // different counters starting from `1` one for sent messages and one for // received message. // // Type: int // RequirementLevel: Optional // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // MessageCompressedSizeKey is the attribute Key conforming to the // "message.compressed_size" semantic conventions. It represents the // compressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // MessageUncompressedSizeKey is the attribute Key conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) // MessageID returns an attribute KeyValue conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two different // counters starting from `1` one for sent messages and one for received // message. func MessageID(val int) attribute.KeyValue { return MessageIDKey.Int(val) } // MessageCompressedSize returns an attribute KeyValue conforming to the // "message.compressed_size" semantic conventions. It represents the compressed // size of the message in bytes. func MessageCompressedSize(val int) attribute.KeyValue { return MessageCompressedSizeKey.Int(val) } // MessageUncompressedSize returns an attribute KeyValue conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. func MessageUncompressedSize(val int) attribute.KeyValue { return MessageUncompressedSizeKey.Int(val) } // The attributes used to report a single exception associated with a span. const ( // ExceptionEscapedKey is the attribute Key conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be // set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of // a span, // if that span is ended while the exception is still logically "in // flight". // This may be actually "in flight" in some languages (e.g. if the // exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most // languages. // // It is usually not possible to determine at the point where an exception // is thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending // the span, // as done in the [example above](#recording-an-exception). // // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // ExceptionEscaped returns an attribute KeyValue conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be set to // true if the exception event is recorded at a point where it is known that // the exception is escaping the scope of the span. func ExceptionEscaped(val bool) attribute.KeyValue { return ExceptionEscapedKey.Bool(val) } opentelemetry-go-1.21.0/semconv/v1.18.0/exception.go000066400000000000000000000014301452547353200220140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.18.0/http.go000066400000000000000000000014401452547353200207760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) opentelemetry-go-1.21.0/semconv/v1.18.0/httpconv/000077500000000000000000000000001452547353200213365ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.18.0/httpconv/http.go000066400000000000000000000146571452547353200226610ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. package httpconv // import "go.opentelemetry.io/otel/semconv/v1.18.0/httpconv" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.18.0" ) var ( nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } hc = &internal.HTTPConv{ NetConv: nc, EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, HTTPRouteKey: semconv.HTTPRouteKey, HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, HTTPUserAgentKey: semconv.HTTPUserAgentKey, } ) // ClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", // "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func ClientResponse(resp *http.Response) []attribute.KeyValue { return hc.ClientResponse(resp) } // ClientRequest returns trace attributes for an HTTP request made by a client. // The following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func ClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func ClientStatus(code int) (codes.Code, string) { return hc.ClientStatus(code) } // ServerRequest returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", // "http.client_ip". func ServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func ServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } // RequestHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func RequestHeader(h http.Header) []attribute.KeyValue { return hc.RequestHeader(h) } // ResponseHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the http.user_agent attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func ResponseHeader(h http.Header) []attribute.KeyValue { return hc.ResponseHeader(h) } opentelemetry-go-1.21.0/semconv/v1.18.0/netconv/000077500000000000000000000000001452547353200211455ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.18.0/netconv/net.go000066400000000000000000000053211452547353200222630ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconv provides OpenTelemetry network semantic conventions for // tracing telemetry. package netconv // import "go.opentelemetry.io/otel/semconv/v1.18.0/netconv" import ( "net" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv/internal/v2" semconv "go.opentelemetry.io/otel/semconv/v1.18.0" ) var nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetSockHostAddrKey: semconv.NetSockHostAddrKey, NetSockHostPortKey: semconv.NetSockHostPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } // Transport returns a trace attribute describing the transport protocol of the // passed network. See the net.Dial for information about acceptable network // values. func Transport(network string) attribute.KeyValue { return nc.Transport(network) } // Client returns trace attributes for a client network connection to address. // See net.Dial for information about acceptable address values, address should // be the same as the one used to create conn. If conn is nil, only network // peer attributes will be returned that describe address. Otherwise, the // socket level information about conn will also be included. func Client(address string, conn net.Conn) []attribute.KeyValue { return nc.Client(address, conn) } // Server returns trace attributes for a network listener listening at address. // See net.Listen for information about acceptable address values, address // should be the same as the one used to create ln. If ln is nil, only network // host attributes will be returned that describe address. Otherwise, the // socket level information about ln will also be included. func Server(address string, ln net.Listener) []attribute.KeyValue { return nc.Server(address, ln) } opentelemetry-go-1.21.0/semconv/v1.18.0/resource.go000066400000000000000000002307211452547353200216540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is // running. The `browser.*` attributes MUST be used only for resources that // represent applications running in a web browser (regardless of whether // running on a mobile or desktop device). const ( // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" // semantic conventions. It represents the array of brand name and version // separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") // BrowserPlatformKey is the attribute Key conforming to the // "browser.platform" semantic conventions. It represents the platform on // which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute // SHOULD be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client // Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in // the [`os.type` and `os.name` attributes](./os.md). However, for // consistency, the values in the `browser.platform` attribute should // capture the exact value that the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" // semantic conventions. It represents a boolean that is true if the // browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.mobile`). If unavailable, this attribute // SHOULD be left unset. BrowserMobileKey = attribute.Key("browser.mobile") // BrowserUserAgentKey is the attribute Key conforming to the // "browser.user_agent" semantic conventions. It represents the full // user-agent string provided by the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) // AppleWebKit/537.36 (KHTML, ' // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' // Note: The user-agent value SHOULD be provided only from browsers that do // not have a mechanism to retrieve brands and platform individually from // the User-Agent Client Hints API. To retrieve the value, the legacy // `navigator.userAgent` API can be used. BrowserUserAgentKey = attribute.Key("browser.user_agent") // BrowserLanguageKey is the attribute Key conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'en', 'en-US', 'fr', 'fr-FR' // Note: This value is intended to be taken from the Navigator API // `navigator.language`. BrowserLanguageKey = attribute.Key("browser.language") ) // BrowserBrands returns an attribute KeyValue conforming to the // "browser.brands" semantic conventions. It represents the array of brand name // and version separated by a space func BrowserBrands(val ...string) attribute.KeyValue { return BrowserBrandsKey.StringSlice(val) } // BrowserPlatform returns an attribute KeyValue conforming to the // "browser.platform" semantic conventions. It represents the platform on which // the browser is running func BrowserPlatform(val string) attribute.KeyValue { return BrowserPlatformKey.String(val) } // BrowserMobile returns an attribute KeyValue conforming to the // "browser.mobile" semantic conventions. It represents a boolean that is true // if the browser is running on a mobile device func BrowserMobile(val bool) attribute.KeyValue { return BrowserMobileKey.Bool(val) } // BrowserUserAgent returns an attribute KeyValue conforming to the // "browser.user_agent" semantic conventions. It represents the full user-agent // string provided by the browser func BrowserUserAgent(val string) attribute.KeyValue { return BrowserUserAgentKey.String(val) } // BrowserLanguage returns an attribute KeyValue conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser func BrowserLanguage(val string) attribute.KeyValue { return BrowserLanguageKey.String(val) } // A cloud environment (e.g. GCP, Azure, AWS) const ( // CloudProviderKey is the attribute Key conforming to the "cloud.provider" // semantic conventions. It represents the name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // CloudAccountIDKey is the attribute Key conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account // ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // CloudRegionKey is the attribute Key conforming to the "cloud.region" // semantic conventions. It represents the geographical region the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for // example [Alibaba Cloud // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), // [Google Cloud regions](https://cloud.google.com/about/locations), or // [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // CloudAvailabilityZoneKey is the attribute Key conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the // resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google // Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" // semantic conventions. It represents the cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // IBM Cloud CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // Red Hat OpenShift on Alibaba Cloud CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Red Hat OpenShift on AWS (ROSA) CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Azure Red Hat OpenShift CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Red Hat OpenShift on Google Cloud CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") // Red Hat OpenShift on IBM Cloud CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // CloudAccountID returns an attribute KeyValue conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account ID // the resource is assigned to. func CloudAccountID(val string) attribute.KeyValue { return CloudAccountIDKey.String(val) } // CloudRegion returns an attribute KeyValue conforming to the // "cloud.region" semantic conventions. It represents the geographical region // the resource is running. func CloudRegion(val string) attribute.KeyValue { return CloudRegionKey.String(val) } // CloudAvailabilityZone returns an attribute KeyValue conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to increase // availability. Availability zone represents the zone where the resource is // running. func CloudAvailabilityZone(val string) attribute.KeyValue { return CloudAvailabilityZoneKey.String(val) } // Resources used by AWS Elastic Container Service (ECS). const ( // AWSECSContainerARNKey is the attribute Key conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // AWSECSClusterARNKey is the attribute Key conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an // [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // AWSECSLaunchtypeKey is the attribute Key conforming to the // "aws.ecs.launchtype" semantic conventions. It represents the [launch // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) // for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // AWSECSTaskARNKey is the attribute Key conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an // [ECS task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // AWSECSTaskFamilyKey is the attribute Key conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // AWSECSTaskRevisionKey is the attribute Key conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision // for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // AWSECSContainerARN returns an attribute KeyValue conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). func AWSECSContainerARN(val string) attribute.KeyValue { return AWSECSContainerARNKey.String(val) } // AWSECSClusterARN returns an attribute KeyValue conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). func AWSECSClusterARN(val string) attribute.KeyValue { return AWSECSClusterARNKey.String(val) } // AWSECSTaskARN returns an attribute KeyValue conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS // task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). func AWSECSTaskARN(val string) attribute.KeyValue { return AWSECSTaskARNKey.String(val) } // AWSECSTaskFamily returns an attribute KeyValue conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. func AWSECSTaskFamily(val string) attribute.KeyValue { return AWSECSTaskFamilyKey.String(val) } // AWSECSTaskRevision returns an attribute KeyValue conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision for // this task definition. func AWSECSTaskRevision(val string) attribute.KeyValue { return AWSECSTaskRevisionKey.String(val) } // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // AWSEKSClusterARNKey is the attribute Key conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an // EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // AWSEKSClusterARN returns an attribute KeyValue conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS // cluster. func AWSEKSClusterARN(val string) attribute.KeyValue { return AWSEKSClusterARNKey.String(val) } // Resources specific to Amazon Web Services. const ( // AWSLogGroupNamesKey is the attribute Key conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of // the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like // multi-container applications, where a single application has sidecar // containers, and each write to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // AWSLogGroupARNsKey is the attribute Key conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon // Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // AWSLogStreamNamesKey is the attribute Key conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) // of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // AWSLogStreamARNsKey is the attribute Key conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of // the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). // One log group can contain several log streams, so these ARNs necessarily // identify both a log group and a log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // AWSLogGroupNames returns an attribute KeyValue conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of the // AWS log group(s) an application is writing to. func AWSLogGroupNames(val ...string) attribute.KeyValue { return AWSLogGroupNamesKey.StringSlice(val) } // AWSLogGroupARNs returns an attribute KeyValue conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource // Name(s) (ARN) of the AWS log group(s). func AWSLogGroupARNs(val ...string) attribute.KeyValue { return AWSLogGroupARNsKey.StringSlice(val) } // AWSLogStreamNames returns an attribute KeyValue conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) of // the AWS log stream(s) an application is writing to. func AWSLogStreamNames(val ...string) attribute.KeyValue { return AWSLogStreamNamesKey.StringSlice(val) } // AWSLogStreamARNs returns an attribute KeyValue conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the // AWS log stream(s). func AWSLogStreamARNs(val ...string) attribute.KeyValue { return AWSLogStreamARNsKey.StringSlice(val) } // A container instance. const ( // ContainerNameKey is the attribute Key conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // ContainerIDKey is the attribute Key conforming to the "container.id" // semantic conventions. It represents the container ID. Usually a UUID, as // for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // ContainerRuntimeKey is the attribute Key conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // ContainerImageNameKey is the attribute Key conforming to the // "container.image.name" semantic conventions. It represents the name of // the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // ContainerImageTagKey is the attribute Key conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // ContainerName returns an attribute KeyValue conforming to the // "container.name" semantic conventions. It represents the container name used // by container runtime. func ContainerName(val string) attribute.KeyValue { return ContainerNameKey.String(val) } // ContainerID returns an attribute KeyValue conforming to the // "container.id" semantic conventions. It represents the container ID. Usually // a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. func ContainerID(val string) attribute.KeyValue { return ContainerIDKey.String(val) } // ContainerRuntime returns an attribute KeyValue conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. func ContainerRuntime(val string) attribute.KeyValue { return ContainerRuntimeKey.String(val) } // ContainerImageName returns an attribute KeyValue conforming to the // "container.image.name" semantic conventions. It represents the name of the // image the container was built on. func ContainerImageName(val string) attribute.KeyValue { return ContainerImageNameKey.String(val) } // ContainerImageTag returns an attribute KeyValue conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. func ContainerImageTag(val string) attribute.KeyValue { return ContainerImageTagKey.String(val) } // The software deployment. const ( // DeploymentEnvironmentKey is the attribute Key conforming to the // "deployment.environment" semantic conventions. It represents the name of // the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // DeploymentEnvironment returns an attribute KeyValue conforming to the // "deployment.environment" semantic conventions. It represents the name of the // [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). func DeploymentEnvironment(val string) attribute.KeyValue { return DeploymentEnvironmentKey.String(val) } // The device on which the process represented by this resource is running. const ( // DeviceIDKey is the attribute Key conforming to the "device.id" semantic // conventions. It represents a unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values // outlined below. This value is not an advertising identifier and MUST NOT // be used as such. On iOS (Swift or Objective-C), this value MUST be equal // to the [vendor // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). // On Android (Java or Kotlin), this value MUST be equal to the Firebase // Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on // best practices and exact implementation details. Caution should be taken // when storing personal data or anything which can identify a user. GDPR // and data protection laws may apply, ensure you do your own due // diligence. DeviceIDKey = attribute.Key("device.id") // DeviceModelIdentifierKey is the attribute Key conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version // of the model identifier rather than the market or consumer-friendly name // of the device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // DeviceModelNameKey is the attribute Key conforming to the // "device.model.name" semantic conventions. It represents the marketing // name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of // the device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // DeviceManufacturerKey is the attribute Key conforming to the // "device.manufacturer" semantic conventions. It represents the name of // the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // DeviceID returns an attribute KeyValue conforming to the "device.id" // semantic conventions. It represents a unique identifier representing the // device func DeviceID(val string) attribute.KeyValue { return DeviceIDKey.String(val) } // DeviceModelIdentifier returns an attribute KeyValue conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device func DeviceModelIdentifier(val string) attribute.KeyValue { return DeviceModelIdentifierKey.String(val) } // DeviceModelName returns an attribute KeyValue conforming to the // "device.model.name" semantic conventions. It represents the marketing name // for the device model func DeviceModelName(val string) attribute.KeyValue { return DeviceModelNameKey.String(val) } // DeviceManufacturer returns an attribute KeyValue conforming to the // "device.manufacturer" semantic conventions. It represents the name of the // device manufacturer func DeviceManufacturer(val string) attribute.KeyValue { return DeviceManufacturerKey.String(val) } // A serverless instance. const ( // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic // conventions. It represents the name of the single function that this // runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the // FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) // span attributes). // // For some cloud providers, the above definition is ambiguous. The // following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud // providers/products: // // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `faas.id` attribute). FaaSNameKey = attribute.Key("faas.name") // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic // conventions. It represents the unique ID of the single function that // this runtime instance executes. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: On some cloud providers, it may not be possible to determine the // full ID at startup, // so consider setting `faas.id` as a span attribute instead. // // The exact value to use for `faas.id` depends on the cloud provider: // // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) // with the resolved function version, as the same runtime instance may // be invokable with // multiple different aliases. // * **GCP:** The [URI of the // resource](https://cloud.google.com/iam/docs/full-resource-names) // * **Azure:** The [Fully Qualified Resource // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider. FaaSIDKey = attribute.Key("faas.id") // FaaSVersionKey is the attribute Key conforming to the "faas.version" // semantic conventions. It represents the immutable version of the // function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" // semantic conventions. It represents the execution environment ID as a // string, that will be potentially reused for other invocations to the // same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // FaaSMaxMemoryKey is the attribute Key conforming to the // "faas.max_memory" semantic conventions. It represents the amount of // memory available to the serverless function in MiB. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little // memory can easily stop a Java AWS Lambda function from working // correctly. On AWS Lambda, the environment variable // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // FaaSName returns an attribute KeyValue conforming to the "faas.name" // semantic conventions. It represents the name of the single function that // this runtime instance executes. func FaaSName(val string) attribute.KeyValue { return FaaSNameKey.String(val) } // FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic // conventions. It represents the unique ID of the single function that this // runtime instance executes. func FaaSID(val string) attribute.KeyValue { return FaaSIDKey.String(val) } // FaaSVersion returns an attribute KeyValue conforming to the // "faas.version" semantic conventions. It represents the immutable version of // the function being executed. func FaaSVersion(val string) attribute.KeyValue { return FaaSVersionKey.String(val) } // FaaSInstance returns an attribute KeyValue conforming to the // "faas.instance" semantic conventions. It represents the execution // environment ID as a string, that will be potentially reused for other // invocations to the same function/function version. func FaaSInstance(val string) attribute.KeyValue { return FaaSInstanceKey.String(val) } // FaaSMaxMemory returns an attribute KeyValue conforming to the // "faas.max_memory" semantic conventions. It represents the amount of memory // available to the serverless function in MiB. func FaaSMaxMemory(val int) attribute.KeyValue { return FaaSMaxMemoryKey.Int(val) } // A host is defined as a general computing instance. const ( // HostIDKey is the attribute Key conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be // the instance_id assigned by the cloud provider. For non-containerized // Linux systems, the `machine-id` located in `/etc/machine-id` or // `/var/lib/dbus/machine-id` may be used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'fdbf79e8af94cb7f9e8df36789187052' HostIDKey = attribute.Key("host.id") // HostNameKey is the attribute Key conforming to the "host.name" semantic // conventions. It represents the name of the host. On Unix systems, it may // contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // HostTypeKey is the attribute Key conforming to the "host.type" semantic // conventions. It represents the type of host. For Cloud, this must be the // machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // HostArchKey is the attribute Key conforming to the "host.arch" semantic // conventions. It represents the CPU architecture the host system is // running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // HostImageNameKey is the attribute Key conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // HostImageIDKey is the attribute Key conforming to the "host.image.id" // semantic conventions. It represents the vM image ID. For Cloud, this // value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // HostImageVersionKey is the attribute Key conforming to the // "host.image.version" semantic conventions. It represents the version // string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // HostID returns an attribute KeyValue conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be the // instance_id assigned by the cloud provider. For non-containerized Linux // systems, the `machine-id` located in `/etc/machine-id` or // `/var/lib/dbus/machine-id` may be used. func HostID(val string) attribute.KeyValue { return HostIDKey.String(val) } // HostName returns an attribute KeyValue conforming to the "host.name" // semantic conventions. It represents the name of the host. On Unix systems, // it may contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. func HostName(val string) attribute.KeyValue { return HostNameKey.String(val) } // HostType returns an attribute KeyValue conforming to the "host.type" // semantic conventions. It represents the type of host. For Cloud, this must // be the machine type. func HostType(val string) attribute.KeyValue { return HostTypeKey.String(val) } // HostImageName returns an attribute KeyValue conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. func HostImageName(val string) attribute.KeyValue { return HostImageNameKey.String(val) } // HostImageID returns an attribute KeyValue conforming to the // "host.image.id" semantic conventions. It represents the vM image ID. For // Cloud, this value is from the provider. func HostImageID(val string) attribute.KeyValue { return HostImageIDKey.String(val) } // HostImageVersion returns an attribute KeyValue conforming to the // "host.image.version" semantic conventions. It represents the version string // of the VM image as defined in [Version // Attributes](README.md#version-attributes). func HostImageVersion(val string) attribute.KeyValue { return HostImageVersionKey.String(val) } // A Kubernetes Cluster. const ( // K8SClusterNameKey is the attribute Key conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // K8SClusterName returns an attribute KeyValue conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. func K8SClusterName(val string) attribute.KeyValue { return K8SClusterNameKey.String(val) } // A Kubernetes Node object. const ( // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // K8SNodeName returns an attribute KeyValue conforming to the // "k8s.node.name" semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { return K8SNodeNameKey.String(val) } // K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } // A Kubernetes Namespace. const ( // K8SNamespaceNameKey is the attribute Key conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } // A Kubernetes Pod object. const ( // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { return K8SPodNameKey.String(val) } // A container in a // [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // K8SContainerNameKey is the attribute Key conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // K8SContainerRestartCountKey is the attribute Key conforming to the // "k8s.container.restart_count" semantic conventions. It represents the // number of times the container was restarted. This attribute can be used // to identify a particular container (running or stopped) within a // container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // K8SContainerName returns an attribute KeyValue conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). func K8SContainerName(val string) attribute.KeyValue { return K8SContainerNameKey.String(val) } // K8SContainerRestartCount returns an attribute KeyValue conforming to the // "k8s.container.restart_count" semantic conventions. It represents the number // of times the container was restarted. This attribute can be used to identify // a particular container (running or stopped) within a container spec. func K8SContainerRestartCount(val int) attribute.KeyValue { return K8SContainerRestartCountKey.Int(val) } // A Kubernetes ReplicaSet object. const ( // K8SReplicaSetUIDKey is the attribute Key conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // K8SReplicaSetNameKey is the attribute Key conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of // the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // K8SReplicaSetUID returns an attribute KeyValue conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. func K8SReplicaSetUID(val string) attribute.KeyValue { return K8SReplicaSetUIDKey.String(val) } // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. func K8SReplicaSetName(val string) attribute.KeyValue { return K8SReplicaSetNameKey.String(val) } // A Kubernetes Deployment object. const ( // K8SDeploymentUIDKey is the attribute Key conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // K8SDeploymentNameKey is the attribute Key conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of // the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // K8SDeploymentUID returns an attribute KeyValue conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. func K8SDeploymentUID(val string) attribute.KeyValue { return K8SDeploymentUIDKey.String(val) } // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. func K8SDeploymentName(val string) attribute.KeyValue { return K8SDeploymentNameKey.String(val) } // A Kubernetes StatefulSet object. const ( // K8SStatefulSetUIDKey is the attribute Key conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // K8SStatefulSetNameKey is the attribute Key conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of // the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // K8SStatefulSetUID returns an attribute KeyValue conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. func K8SStatefulSetUID(val string) attribute.KeyValue { return K8SStatefulSetUIDKey.String(val) } // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. func K8SStatefulSetName(val string) attribute.KeyValue { return K8SStatefulSetNameKey.String(val) } // A Kubernetes DaemonSet object. const ( // K8SDaemonSetUIDKey is the attribute Key conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // K8SDaemonSetNameKey is the attribute Key conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // K8SDaemonSetUID returns an attribute KeyValue conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. func K8SDaemonSetName(val string) attribute.KeyValue { return K8SDaemonSetNameKey.String(val) } // A Kubernetes Job object. const ( // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { return K8SJobNameKey.String(val) } // A Kubernetes CronJob object. const ( // K8SCronJobUIDKey is the attribute Key conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // K8SCronJobNameKey is the attribute Key conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // K8SCronJobUID returns an attribute KeyValue conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. func K8SCronJobName(val string) attribute.KeyValue { return K8SCronJobNameKey.String(val) } // The operating system (OS) on which the process represented by this resource // is running. const ( // OSTypeKey is the attribute Key conforming to the "os.type" semantic // conventions. It represents the operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // OSDescriptionKey is the attribute Key conforming to the "os.description" // semantic conventions. It represents the human readable (not intended to // be parsed) OS version information, like e.g. reported by `ver` or // `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 // LTS' OSDescriptionKey = attribute.Key("os.description") // OSNameKey is the attribute Key conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // OSVersionKey is the attribute Key conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // OSDescription returns an attribute KeyValue conforming to the // "os.description" semantic conventions. It represents the human readable (not // intended to be parsed) OS version information, like e.g. reported by `ver` // or `lsb_release -a` commands. func OSDescription(val string) attribute.KeyValue { return OSDescriptionKey.String(val) } // OSName returns an attribute KeyValue conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. func OSName(val string) attribute.KeyValue { return OSNameKey.String(val) } // OSVersion returns an attribute KeyValue conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). func OSVersion(val string) attribute.KeyValue { return OSVersionKey.String(val) } // An operating system process. const ( // ProcessPIDKey is the attribute Key conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // ProcessParentPIDKey is the attribute Key conforming to the // "process.parent_pid" semantic conventions. It represents the parent // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // ProcessExecutableNameKey is the attribute Key conforming to the // "process.executable.name" semantic conventions. It represents the name // of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name // of `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // ProcessExecutablePathKey is the attribute Key conforming to the // "process.executable.path" semantic conventions. It represents the full // path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // ProcessCommandKey is the attribute Key conforming to the // "process.command" semantic conventions. It represents the command used // to launch the process (i.e. the command name). On Linux based systems, // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can // be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // ProcessCommandLineKey is the attribute Key conforming to the // "process.command_line" semantic conventions. It represents the full // command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. // Do not set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // ProcessCommandArgsKey is the attribute Key conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // ProcessOwnerKey is the attribute Key conforming to the "process.owner" // semantic conventions. It represents the username of the user that owns // the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // ProcessPID returns an attribute KeyValue conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). func ProcessPID(val int) attribute.KeyValue { return ProcessPIDKey.Int(val) } // ProcessParentPID returns an attribute KeyValue conforming to the // "process.parent_pid" semantic conventions. It represents the parent Process // identifier (PID). func ProcessParentPID(val int) attribute.KeyValue { return ProcessParentPIDKey.Int(val) } // ProcessExecutableName returns an attribute KeyValue conforming to the // "process.executable.name" semantic conventions. It represents the name of // the process executable. On Linux based systems, can be set to the `Name` in // `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. func ProcessExecutableName(val string) attribute.KeyValue { return ProcessExecutableNameKey.String(val) } // ProcessExecutablePath returns an attribute KeyValue conforming to the // "process.executable.path" semantic conventions. It represents the full path // to the process executable. On Linux based systems, can be set to the target // of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. func ProcessExecutablePath(val string) attribute.KeyValue { return ProcessExecutablePathKey.String(val) } // ProcessCommand returns an attribute KeyValue conforming to the // "process.command" semantic conventions. It represents the command used to // launch the process (i.e. the command name). On Linux based systems, can be // set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to // the first parameter extracted from `GetCommandLineW`. func ProcessCommand(val string) attribute.KeyValue { return ProcessCommandKey.String(val) } // ProcessCommandLine returns an attribute KeyValue conforming to the // "process.command_line" semantic conventions. It represents the full command // used to launch the process as a single string representing the full command. // On Windows, can be set to the result of `GetCommandLineW`. Do not set this // if you have to assemble it just for monitoring; use `process.command_args` // instead. func ProcessCommandLine(val string) attribute.KeyValue { return ProcessCommandLineKey.String(val) } // ProcessCommandArgs returns an attribute KeyValue conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received by // the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. func ProcessCommandArgs(val ...string) attribute.KeyValue { return ProcessCommandArgsKey.StringSlice(val) } // ProcessOwner returns an attribute KeyValue conforming to the // "process.owner" semantic conventions. It represents the username of the user // that owns the process. func ProcessOwner(val string) attribute.KeyValue { return ProcessOwnerKey.String(val) } // The single (language) runtime instance which is monitored. const ( // ProcessRuntimeNameKey is the attribute Key conforming to the // "process.runtime.name" semantic conventions. It represents the name of // the runtime of this process. For compiled native binaries, this SHOULD // be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // ProcessRuntimeVersionKey is the attribute Key conforming to the // "process.runtime.version" semantic conventions. It represents the // version of the runtime of this process, as returned by the runtime // without modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // ProcessRuntimeDescriptionKey is the attribute Key conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // ProcessRuntimeName returns an attribute KeyValue conforming to the // "process.runtime.name" semantic conventions. It represents the name of the // runtime of this process. For compiled native binaries, this SHOULD be the // name of the compiler. func ProcessRuntimeName(val string) attribute.KeyValue { return ProcessRuntimeNameKey.String(val) } // ProcessRuntimeVersion returns an attribute KeyValue conforming to the // "process.runtime.version" semantic conventions. It represents the version of // the runtime of this process, as returned by the runtime without // modification. func ProcessRuntimeVersion(val string) attribute.KeyValue { return ProcessRuntimeVersionKey.String(val) } // ProcessRuntimeDescription returns an attribute KeyValue conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. func ProcessRuntimeDescription(val string) attribute.KeyValue { return ProcessRuntimeDescriptionKey.String(val) } // A service instance. const ( // ServiceNameKey is the attribute Key conforming to the "service.name" // semantic conventions. It represents the logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled // services. If the value was not specified, SDKs MUST fallback to // `unknown_service:` concatenated with // [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, // the value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // ServiceNamespaceKey is the attribute Key conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group // of services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` // is expected to be unique for all services that have no explicit // namespace defined (so the empty/unspecified namespace is simply one more // valid namespace). Zero-length namespace string is assumed equal to // unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // ServiceInstanceIDKey is the attribute Key conforming to the // "service.instance.id" semantic conventions. It represents the string ID // of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be // globally unique). The ID helps to distinguish instances of the same // service that exist at the same time (e.g. instances of a horizontally // scaled service). It is preferable for the ID to be persistent and stay // the same for the lifetime of the service instance, however it is // acceptable that the ID is ephemeral and changes during important // lifetime events for the service (e.g. service restarts). If the service // has no inherent unique ID that can be used as the value of this // attribute it is recommended to generate a random Version 1 or Version 4 // RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // ServiceVersionKey is the attribute Key conforming to the // "service.version" semantic conventions. It represents the version string // of the service API or implementation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // ServiceName returns an attribute KeyValue conforming to the // "service.name" semantic conventions. It represents the logical name of the // service. func ServiceName(val string) attribute.KeyValue { return ServiceNameKey.String(val) } // ServiceNamespace returns an attribute KeyValue conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. func ServiceNamespace(val string) attribute.KeyValue { return ServiceNamespaceKey.String(val) } // ServiceInstanceID returns an attribute KeyValue conforming to the // "service.instance.id" semantic conventions. It represents the string ID of // the service instance. func ServiceInstanceID(val string) attribute.KeyValue { return ServiceInstanceIDKey.String(val) } // ServiceVersion returns an attribute KeyValue conforming to the // "service.version" semantic conventions. It represents the version string of // the service API or implementation. func ServiceVersion(val string) attribute.KeyValue { return ServiceVersionKey.String(val) } // The telemetry SDK used to capture data recorded by the instrumentation // libraries. const ( // TelemetrySDKNameKey is the attribute Key conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // TelemetrySDKLanguageKey is the attribute Key conforming to the // "telemetry.sdk.language" semantic conventions. It represents the // language of the telemetry SDK. // // Type: Enum // RequirementLevel: Optional // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // TelemetrySDKVersionKey is the attribute Key conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // TelemetryAutoVersionKey is the attribute Key conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // TelemetrySDKName returns an attribute KeyValue conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. func TelemetrySDKName(val string) attribute.KeyValue { return TelemetrySDKNameKey.String(val) } // TelemetrySDKVersion returns an attribute KeyValue conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. func TelemetrySDKVersion(val string) attribute.KeyValue { return TelemetrySDKVersionKey.String(val) } // TelemetryAutoVersion returns an attribute KeyValue conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. func TelemetryAutoVersion(val string) attribute.KeyValue { return TelemetryAutoVersionKey.String(val) } // Resource describing the packaged software running the application code. Web // engines are typically executed using process.runtime. const ( // WebEngineNameKey is the attribute Key conforming to the "webengine.name" // semantic conventions. It represents the name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // WebEngineVersionKey is the attribute Key conforming to the // "webengine.version" semantic conventions. It represents the version of // the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // WebEngineDescriptionKey is the attribute Key conforming to the // "webengine.description" semantic conventions. It represents the // additional description of the web engine (e.g. detailed version and // edition information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - // 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) // WebEngineName returns an attribute KeyValue conforming to the // "webengine.name" semantic conventions. It represents the name of the web // engine. func WebEngineName(val string) attribute.KeyValue { return WebEngineNameKey.String(val) } // WebEngineVersion returns an attribute KeyValue conforming to the // "webengine.version" semantic conventions. It represents the version of the // web engine. func WebEngineVersion(val string) attribute.KeyValue { return WebEngineVersionKey.String(val) } // WebEngineDescription returns an attribute KeyValue conforming to the // "webengine.description" semantic conventions. It represents the additional // description of the web engine (e.g. detailed version and edition // information). func WebEngineDescription(val string) attribute.KeyValue { return WebEngineDescriptionKey.String(val) } // Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's // concepts. const ( // OTelScopeNameKey is the attribute Key conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' OTelScopeNameKey = attribute.Key("otel.scope.name") // OTelScopeVersionKey is the attribute Key conforming to the // "otel.scope.version" semantic conventions. It represents the version of // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0.0' OTelScopeVersionKey = attribute.Key("otel.scope.version") ) // OTelScopeName returns an attribute KeyValue conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). func OTelScopeName(val string) attribute.KeyValue { return OTelScopeNameKey.String(val) } // OTelScopeVersion returns an attribute KeyValue conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). func OTelScopeVersion(val string) attribute.KeyValue { return OTelScopeVersionKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry // Scope's concepts. const ( // OTelLibraryNameKey is the attribute Key conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, // use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' OTelLibraryNameKey = attribute.Key("otel.library.name") // OTelLibraryVersionKey is the attribute Key conforming to the // "otel.library.version" semantic conventions. It represents the // deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '1.0.0' OTelLibraryVersionKey = attribute.Key("otel.library.version") ) // OTelLibraryName returns an attribute KeyValue conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, use // the `otel.scope.name` attribute. func OTelLibraryName(val string) attribute.KeyValue { return OTelLibraryNameKey.String(val) } // OTelLibraryVersion returns an attribute KeyValue conforming to the // "otel.library.version" semantic conventions. It represents the deprecated, // use the `otel.scope.version` attribute. func OTelLibraryVersion(val string) attribute.KeyValue { return OTelLibraryVersionKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.18.0/schema.go000066400000000000000000000017141452547353200212630ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.18.0" opentelemetry-go-1.21.0/semconv/v1.18.0/trace.go000066400000000000000000004142571452547353200211330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.18.0" import "go.opentelemetry.io/otel/attribute" // The shared attributes used to report a single exception associated with a // span or log. const ( // ExceptionTypeKey is the attribute Key conforming to the "exception.type" // semantic conventions. It represents the type of the exception (its // fully-qualified class name, if applicable). The dynamic type of the // exception should be preferred over the static type in languages that // support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // ExceptionMessageKey is the attribute Key conforming to the // "exception.message" semantic conventions. It represents the exception // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str // implicitly" ExceptionMessageKey = attribute.Key("exception.message") // ExceptionStacktraceKey is the attribute Key conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace // as a string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) // ExceptionType returns an attribute KeyValue conforming to the // "exception.type" semantic conventions. It represents the type of the // exception (its fully-qualified class name, if applicable). The dynamic type // of the exception should be preferred over the static type in languages that // support it. func ExceptionType(val string) attribute.KeyValue { return ExceptionTypeKey.String(val) } // ExceptionMessage returns an attribute KeyValue conforming to the // "exception.message" semantic conventions. It represents the exception // message. func ExceptionMessage(val string) attribute.KeyValue { return ExceptionMessageKey.String(val) } // ExceptionStacktrace returns an attribute KeyValue conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace as a // string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. func ExceptionStacktrace(val string) attribute.KeyValue { return ExceptionStacktraceKey.String(val) } // Attributes for Events represented using Log Records. const ( // EventNameKey is the attribute Key conforming to the "event.name" // semantic conventions. It represents the name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") // EventDomainKey is the attribute Key conforming to the "event.domain" // semantic conventions. It represents the domain identifies the business // context for the events. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: Events across different domains may have same `event.name`, yet be // unrelated events. EventDomainKey = attribute.Key("event.domain") ) var ( // Events from browser apps EventDomainBrowser = EventDomainKey.String("browser") // Events from mobile apps EventDomainDevice = EventDomainKey.String("device") // Events from Kubernetes EventDomainK8S = EventDomainKey.String("k8s") ) // EventName returns an attribute KeyValue conforming to the "event.name" // semantic conventions. It represents the name identifies the event. func EventName(val string) attribute.KeyValue { return EventNameKey.String(val) } // Span attributes used by AWS Lambda (in addition to general `faas` // attributes). const ( // AWSLambdaInvokedARNKey is the attribute Key conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // AWSLambdaInvokedARN returns an attribute KeyValue conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). func AWSLambdaInvokedARN(val string) attribute.KeyValue { return AWSLambdaInvokedARNKey.String(val) } // Attributes for CloudEvents. CloudEvents is a specification on how to define // event data in a standard way. These attributes can be attached to spans when // performing operations with CloudEvents, regardless of the protocol being // used. const ( // CloudeventsEventIDKey is the attribute Key conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // CloudeventsEventSourceKey is the attribute Key conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', // '/cloudevents/spec/pull/123', 'my-service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // CloudeventsEventSpecVersionKey is the attribute Key conforming to the // "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // CloudeventsEventTypeKey is the attribute Key conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', // 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // CloudeventsEventSubjectKey is the attribute Key conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // CloudeventsEventID returns an attribute KeyValue conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. func CloudeventsEventID(val string) attribute.KeyValue { return CloudeventsEventIDKey.String(val) } // CloudeventsEventSource returns an attribute KeyValue conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. func CloudeventsEventSource(val string) attribute.KeyValue { return CloudeventsEventSourceKey.String(val) } // CloudeventsEventSpecVersion returns an attribute KeyValue conforming to // the "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. func CloudeventsEventSpecVersion(val string) attribute.KeyValue { return CloudeventsEventSpecVersionKey.String(val) } // CloudeventsEventType returns an attribute KeyValue conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. func CloudeventsEventType(val string) attribute.KeyValue { return CloudeventsEventTypeKey.String(val) } // CloudeventsEventSubject returns an attribute KeyValue conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by source). func CloudeventsEventSubject(val string) attribute.KeyValue { return CloudeventsEventSubjectKey.String(val) } // Semantic conventions for the OpenTracing Shim const ( // OpentracingRefTypeKey is the attribute Key conforming to the // "opentracing.ref_type" semantic conventions. It represents the // parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // The attributes used to perform database client calls. const ( // DBSystemKey is the attribute Key conforming to the "db.system" semantic // conventions. It represents an identifier for the database management // system (DBMS) product being used. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // DBConnectionStringKey is the attribute Key conforming to the // "db.connection_string" semantic conventions. It represents the // connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // DBUserKey is the attribute Key conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // DBJDBCDriverClassnameKey is the attribute Key conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) // driver used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // DBNameKey is the attribute Key conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name // of the database being accessed. For commands that switch the database, // this should be set to the target database (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called // "schema name". In case there are multiple layers that could be // considered for database name (e.g. Oracle instance name and schema // name), the database name to be used is the more specific layer (e.g. // Oracle schema name). DBNameKey = attribute.Key("db.name") // DBStatementKey is the attribute Key conforming to the "db.statement" // semantic conventions. It represents the database statement being // executed. // // Type: string // RequirementLevel: ConditionallyRequired (If applicable and not // explicitly disabled via instrumentation configuration.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // DBOperationKey is the attribute Key conforming to the "db.operation" // semantic conventions. It represents the name of the operation being // executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not // applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to // attempt any client-side parsing of `db.statement` just to get this // property, but it should be set if the operation name is provided by the // library being instrumented. If the SQL statement has an ambiguous // operation, or performs more than one operation, this value may be // omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // Microsoft SQL Server Compact DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") // ClickHouse DBSystemClickhouse = DBSystemKey.String("clickhouse") // Cloud Spanner DBSystemSpanner = DBSystemKey.String("spanner") ) // DBConnectionString returns an attribute KeyValue conforming to the // "db.connection_string" semantic conventions. It represents the connection // string used to connect to the database. It is recommended to remove embedded // credentials. func DBConnectionString(val string) attribute.KeyValue { return DBConnectionStringKey.String(val) } // DBUser returns an attribute KeyValue conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. func DBUser(val string) attribute.KeyValue { return DBUserKey.String(val) } // DBJDBCDriverClassname returns an attribute KeyValue conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. func DBJDBCDriverClassname(val string) attribute.KeyValue { return DBJDBCDriverClassnameKey.String(val) } // DBName returns an attribute KeyValue conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name of // the database being accessed. For commands that switch the database, this // should be set to the target database (even if the command fails). func DBName(val string) attribute.KeyValue { return DBNameKey.String(val) } // DBStatement returns an attribute KeyValue conforming to the // "db.statement" semantic conventions. It represents the database statement // being executed. func DBStatement(val string) attribute.KeyValue { return DBStatementKey.String(val) } // DBOperation returns an attribute KeyValue conforming to the // "db.operation" semantic conventions. It represents the name of the operation // being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. func DBOperation(val string) attribute.KeyValue { return DBOperationKey.String(val) } // Connection-level attributes for Microsoft SQL Server const ( // DBMSSQLInstanceNameKey is the attribute Key conforming to the // "db.mssql.instance_name" semantic conventions. It represents the // Microsoft SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named // instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no // longer required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // DBMSSQLInstanceName returns an attribute KeyValue conforming to the // "db.mssql.instance_name" semantic conventions. It represents the Microsoft // SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. func DBMSSQLInstanceName(val string) attribute.KeyValue { return DBMSSQLInstanceNameKey.String(val) } // Call-level attributes for Cassandra const ( // DBCassandraPageSizeKey is the attribute Key conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch // size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // DBCassandraConsistencyLevelKey is the attribute Key conforming to the // "db.cassandra.consistency_level" semantic conventions. It represents the // consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // DBCassandraTableKey is the attribute Key conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace // name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra // rather than sql. It is not recommended to attempt any client-side // parsing of `db.statement` just to get this property, but it should be // set if it is provided by the library being instrumented. If the // operation is acting upon an anonymous table, or more than one table, // this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // DBCassandraIdempotenceKey is the attribute Key conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the // whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming // to the "db.cassandra.speculative_execution_count" semantic conventions. // It represents the number of times a query was speculatively executed. // Not set or `0` if the query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // DBCassandraCoordinatorIDKey is the attribute Key conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID // of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // DBCassandraCoordinatorDCKey is the attribute Key conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the // data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // DBCassandraPageSize returns an attribute KeyValue conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch size // used for paging, i.e. how many rows will be returned at once. func DBCassandraPageSize(val int) attribute.KeyValue { return DBCassandraPageSizeKey.Int(val) } // DBCassandraTable returns an attribute KeyValue conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace name // (if applicable). func DBCassandraTable(val string) attribute.KeyValue { return DBCassandraTableKey.String(val) } // DBCassandraIdempotence returns an attribute KeyValue conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the whether // or not the query is idempotent. func DBCassandraIdempotence(val bool) attribute.KeyValue { return DBCassandraIdempotenceKey.Bool(val) } // DBCassandraSpeculativeExecutionCount returns an attribute KeyValue // conforming to the "db.cassandra.speculative_execution_count" semantic // conventions. It represents the number of times a query was speculatively // executed. Not set or `0` if the query was not executed speculatively. func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { return DBCassandraSpeculativeExecutionCountKey.Int(val) } // DBCassandraCoordinatorID returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID of // the coordinating node for a query. func DBCassandraCoordinatorID(val string) attribute.KeyValue { return DBCassandraCoordinatorIDKey.String(val) } // DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the data // center of the coordinating node for a query. func DBCassandraCoordinatorDC(val string) attribute.KeyValue { return DBCassandraCoordinatorDCKey.String(val) } // Call-level attributes for Redis const ( // DBRedisDBIndexKey is the attribute Key conforming to the // "db.redis.database_index" semantic conventions. It represents the index // of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To // be used instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default // database (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // DBRedisDBIndex returns an attribute KeyValue conforming to the // "db.redis.database_index" semantic conventions. It represents the index of // the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be // used instead of the generic `db.name` attribute. func DBRedisDBIndex(val int) attribute.KeyValue { return DBRedisDBIndexKey.Int(val) } // Call-level attributes for MongoDB const ( // DBMongoDBCollectionKey is the attribute Key conforming to the // "db.mongodb.collection" semantic conventions. It represents the // collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // DBMongoDBCollection returns an attribute KeyValue conforming to the // "db.mongodb.collection" semantic conventions. It represents the collection // being accessed within the database stated in `db.name`. func DBMongoDBCollection(val string) attribute.KeyValue { return DBMongoDBCollectionKey.String(val) } // Call-level attributes for SQL databases const ( // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that // the operation is acting upon, including the database name (if // applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting // upon an anonymous table, or more than one table, this value MUST NOT be // set. DBSQLTableKey = attribute.Key("db.sql.table") ) // DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that the // operation is acting upon, including the database name (if applicable). func DBSQLTable(val string) attribute.KeyValue { return DBSQLTableKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's // concepts. const ( // OTelStatusCodeKey is the attribute Key conforming to the // "otel.status_code" semantic conventions. It represents the name of the // code, either "OK" or "ERROR". MUST NOT be set if the status code is // UNSET. // // Type: Enum // RequirementLevel: Optional // Stability: stable OTelStatusCodeKey = attribute.Key("otel.status_code") // OTelStatusDescriptionKey is the attribute Key conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'resource not found' OTelStatusDescriptionKey = attribute.Key("otel.status_description") ) var ( // The operation has been validated by an Application developer or Operator to have completed successfully OTelStatusCodeOk = OTelStatusCodeKey.String("OK") // The operation contains an error OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") ) // OTelStatusDescription returns an attribute KeyValue conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. func OTelStatusDescription(val string) attribute.KeyValue { return OTelStatusDescriptionKey.String(val) } // This semantic convention describes an instance of a function that runs // without provisioning or managing of servers (also known as serverless // functions or Function as a Service (FaaS)) with spans. const ( // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" // semantic conventions. It represents the type of the trigger which caused // this function execution. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" // semantic conventions. It represents the execution ID of the current // function execution. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // FaaSExecution returns an attribute KeyValue conforming to the // "faas.execution" semantic conventions. It represents the execution ID of the // current function execution. func FaaSExecution(val string) attribute.KeyValue { return FaaSExecutionKey.String(val) } // Semantic Convention for FaaS triggered as a response to some data source // operation such as a database or filesystem read/write. const ( // FaaSDocumentCollectionKey is the attribute Key conforming to the // "faas.document.collection" semantic conventions. It represents the name // of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in // Cosmos DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // FaaSDocumentOperationKey is the attribute Key conforming to the // "faas.document.operation" semantic conventions. It represents the // describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // FaaSDocumentTimeKey is the attribute Key conforming to the // "faas.document.time" semantic conventions. It represents a string // containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // FaaSDocumentNameKey is the attribute Key conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or // S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // FaaSDocumentCollection returns an attribute KeyValue conforming to the // "faas.document.collection" semantic conventions. It represents the name of // the source on which the triggering operation was performed. For example, in // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the // database name. func FaaSDocumentCollection(val string) attribute.KeyValue { return FaaSDocumentCollectionKey.String(val) } // FaaSDocumentTime returns an attribute KeyValue conforming to the // "faas.document.time" semantic conventions. It represents a string containing // the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSDocumentTime(val string) attribute.KeyValue { return FaaSDocumentTimeKey.String(val) } // FaaSDocumentName returns an attribute KeyValue conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or S3 // is the name of the file, and in Cosmos DB the table name. func FaaSDocumentName(val string) attribute.KeyValue { return FaaSDocumentNameKey.String(val) } // Semantic Convention for FaaS scheduled to be executed regularly. const ( // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic // conventions. It represents a string containing the function invocation // time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic // conventions. It represents a string containing the schedule period as // [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // FaaSTime returns an attribute KeyValue conforming to the "faas.time" // semantic conventions. It represents a string containing the function // invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSTime(val string) attribute.KeyValue { return FaaSTimeKey.String(val) } // FaaSCron returns an attribute KeyValue conforming to the "faas.cron" // semantic conventions. It represents a string containing the schedule period // as [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). func FaaSCron(val string) attribute.KeyValue { return FaaSCronKey.String(val) } // Contains additional attributes for incoming FaaS spans. const ( // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" // semantic conventions. It represents a boolean that is true if the // serverless function is executed for the first time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // FaaSColdstart returns an attribute KeyValue conforming to the // "faas.coldstart" semantic conventions. It represents a boolean that is true // if the serverless function is executed for the first time (aka cold-start). func FaaSColdstart(val bool) attribute.KeyValue { return FaaSColdstartKey.Bool(val) } // Contains additional attributes for outgoing FaaS spans. const ( // FaaSInvokedNameKey is the attribute Key conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the // invoked function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // FaaSInvokedProviderKey is the attribute Key conforming to the // "faas.invoked_provider" semantic conventions. It represents the cloud // provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the // invoked function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // FaaSInvokedRegionKey is the attribute Key conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud // region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like // AWS or GCP, the region in which a function is hosted is essential to // uniquely identify the function and also part of its endpoint. Since it's // part of the endpoint being called, the region is always known to // clients. In these cases, `faas.invoked_region` MUST be set accordingly. // If the region is unknown to the client or not required for identifying // the invoked function, setting `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the // invoked function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // FaaSInvokedName returns an attribute KeyValue conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. func FaaSInvokedName(val string) attribute.KeyValue { return FaaSInvokedNameKey.String(val) } // FaaSInvokedRegion returns an attribute KeyValue conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud region // of the invoked function. func FaaSInvokedRegion(val string) attribute.KeyValue { return FaaSInvokedRegionKey.String(val) } // These attributes may be used for any network related operation. const ( // NetTransportKey is the attribute Key conforming to the "net.transport" // semantic conventions. It represents the transport protocol used. See // note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") // NetAppProtocolNameKey is the attribute Key conforming to the // "net.app.protocol.name" semantic conventions. It represents the // application layer protocol used. The value SHOULD be normalized to // lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") // NetAppProtocolVersionKey is the attribute Key conforming to the // "net.app.protocol.version" semantic conventions. It represents the // version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `net.app.protocol.version` refers to the version of the protocol // used and might be different from the protocol client's version. If the // HTTP client used has a version of `0.27.2`, but sends HTTP version // `1.1`, this attribute should be set to `1.1`. NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") // NetSockPeerNameKey is the attribute Key conforming to the // "net.sock.peer.name" semantic conventions. It represents the remote // socket peer name. // // Type: string // RequirementLevel: Recommended (If available and different from // `net.peer.name` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // NetSockPeerAddrKey is the attribute Key conforming to the // "net.sock.peer.addr" semantic conventions. It represents the remote // socket peer address: IPv4 or IPv6 for internet protocols, path for local // communication, // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // NetSockPeerPortKey is the attribute Key conforming to the // "net.sock.peer.port" semantic conventions. It represents the remote // socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.peer.port` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // NetSockFamilyKey is the attribute Key conforming to the // "net.sock.family" semantic conventions. It represents the protocol // [address // family](https://man7.org/linux/man-pages/man7/address_families.7.html) // which is used for communication. // // Type: Enum // RequirementLevel: ConditionallyRequired (If different than `inet` and if // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" // semantic conventions. It represents the logical remote hostname, see // note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an // extra DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" // semantic conventions. It represents the logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // NetHostNameKey is the attribute Key conforming to the "net.host.name" // semantic conventions. It represents the logical local hostname or // similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // NetHostPortKey is the attribute Key conforming to the "net.host.port" // semantic conventions. It represents the logical local port number, // preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // NetSockHostAddrKey is the attribute Key conforming to the // "net.sock.host.addr" semantic conventions. It represents the local // socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // NetSockHostPortKey is the attribute Key conforming to the // "net.sock.host.port" semantic conventions. It represents the local // socket port number. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.host.port` and if `net.sock.host.addr` is set.) // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") // NetHostConnectionTypeKey is the attribute Key conforming to the // "net.host.connection.type" semantic conventions. It represents the // internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // NetHostConnectionSubtypeKey is the attribute Key conforming to the // "net.host.connection.subtype" semantic conventions. It represents the // this describes more details regarding the connection.type. It may be the // type of cell technology connection, but it could be used for describing // details about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // NetHostCarrierNameKey is the attribute Key conforming to the // "net.host.carrier.name" semantic conventions. It represents the name of // the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // NetHostCarrierMccKey is the attribute Key conforming to the // "net.host.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // NetHostCarrierMncKey is the attribute Key conforming to the // "net.host.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // NetHostCarrierIccKey is the attribute Key conforming to the // "net.host.carrier.icc" semantic conventions. It represents the ISO // 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // NetAppProtocolName returns an attribute KeyValue conforming to the // "net.app.protocol.name" semantic conventions. It represents the application // layer protocol used. The value SHOULD be normalized to lowercase. func NetAppProtocolName(val string) attribute.KeyValue { return NetAppProtocolNameKey.String(val) } // NetAppProtocolVersion returns an attribute KeyValue conforming to the // "net.app.protocol.version" semantic conventions. It represents the version // of the application layer protocol used. See note below. func NetAppProtocolVersion(val string) attribute.KeyValue { return NetAppProtocolVersionKey.String(val) } // NetSockPeerName returns an attribute KeyValue conforming to the // "net.sock.peer.name" semantic conventions. It represents the remote socket // peer name. func NetSockPeerName(val string) attribute.KeyValue { return NetSockPeerNameKey.String(val) } // NetSockPeerAddr returns an attribute KeyValue conforming to the // "net.sock.peer.addr" semantic conventions. It represents the remote socket // peer address: IPv4 or IPv6 for internet protocols, path for local // communication, // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). func NetSockPeerAddr(val string) attribute.KeyValue { return NetSockPeerAddrKey.String(val) } // NetSockPeerPort returns an attribute KeyValue conforming to the // "net.sock.peer.port" semantic conventions. It represents the remote socket // peer port. func NetSockPeerPort(val int) attribute.KeyValue { return NetSockPeerPortKey.Int(val) } // NetPeerName returns an attribute KeyValue conforming to the // "net.peer.name" semantic conventions. It represents the logical remote // hostname, see note below. func NetPeerName(val string) attribute.KeyValue { return NetPeerNameKey.String(val) } // NetPeerPort returns an attribute KeyValue conforming to the // "net.peer.port" semantic conventions. It represents the logical remote port // number func NetPeerPort(val int) attribute.KeyValue { return NetPeerPortKey.Int(val) } // NetHostName returns an attribute KeyValue conforming to the // "net.host.name" semantic conventions. It represents the logical local // hostname or similar, see note below. func NetHostName(val string) attribute.KeyValue { return NetHostNameKey.String(val) } // NetHostPort returns an attribute KeyValue conforming to the // "net.host.port" semantic conventions. It represents the logical local port // number, preferably the one that the peer used to connect func NetHostPort(val int) attribute.KeyValue { return NetHostPortKey.Int(val) } // NetSockHostAddr returns an attribute KeyValue conforming to the // "net.sock.host.addr" semantic conventions. It represents the local socket // address. Useful in case of a multi-IP host. func NetSockHostAddr(val string) attribute.KeyValue { return NetSockHostAddrKey.String(val) } // NetSockHostPort returns an attribute KeyValue conforming to the // "net.sock.host.port" semantic conventions. It represents the local socket // port number. func NetSockHostPort(val int) attribute.KeyValue { return NetSockHostPortKey.Int(val) } // NetHostCarrierName returns an attribute KeyValue conforming to the // "net.host.carrier.name" semantic conventions. It represents the name of the // mobile carrier. func NetHostCarrierName(val string) attribute.KeyValue { return NetHostCarrierNameKey.String(val) } // NetHostCarrierMcc returns an attribute KeyValue conforming to the // "net.host.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. func NetHostCarrierMcc(val string) attribute.KeyValue { return NetHostCarrierMccKey.String(val) } // NetHostCarrierMnc returns an attribute KeyValue conforming to the // "net.host.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. func NetHostCarrierMnc(val string) attribute.KeyValue { return NetHostCarrierMncKey.String(val) } // NetHostCarrierIcc returns an attribute KeyValue conforming to the // "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 // alpha-2 2-character country code associated with the mobile carrier network. func NetHostCarrierIcc(val string) attribute.KeyValue { return NetHostCarrierIccKey.String(val) } // Operations that access some remote service. const ( // PeerServiceKey is the attribute Key conforming to the "peer.service" // semantic conventions. It represents the // [`service.name`](../../resource/semantic_conventions/README.md#service) // of the remote service. SHOULD be equal to the actual `service.name` // resource attribute of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // PeerService returns an attribute KeyValue conforming to the // "peer.service" semantic conventions. It represents the // [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. func PeerService(val string) attribute.KeyValue { return PeerServiceKey.String(val) } // These attributes may be used for any operation with an authenticated and/or // authorized enduser. const ( // EnduserIDKey is the attribute Key conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted // from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header // in the inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // EnduserRoleKey is the attribute Key conforming to the "enduser.role" // semantic conventions. It represents the actual/assumed role the client // is making the request under extracted from token or application security // context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" // semantic conventions. It represents the scopes or granted authorities // the client currently possesses extracted from token or application // security context. The value would come from the scope associated with an // [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // EnduserID returns an attribute KeyValue conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted from // the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in // the inbound request from outside the system. func EnduserID(val string) attribute.KeyValue { return EnduserIDKey.String(val) } // EnduserRole returns an attribute KeyValue conforming to the // "enduser.role" semantic conventions. It represents the actual/assumed role // the client is making the request under extracted from token or application // security context. func EnduserRole(val string) attribute.KeyValue { return EnduserRoleKey.String(val) } // EnduserScope returns an attribute KeyValue conforming to the // "enduser.scope" semantic conventions. It represents the scopes or granted // authorities the client currently possesses extracted from token or // application security context. The value would come from the scope associated // with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). func EnduserScope(val string) attribute.KeyValue { return EnduserScopeKey.String(val) } // These attributes may be used for any operation to store information about a // thread that started a span. const ( // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic // conventions. It represents the current "managed" thread ID (as opposed // to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // ThreadNameKey is the attribute Key conforming to the "thread.name" // semantic conventions. It represents the current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // ThreadID returns an attribute KeyValue conforming to the "thread.id" // semantic conventions. It represents the current "managed" thread ID (as // opposed to OS thread ID). func ThreadID(val int) attribute.KeyValue { return ThreadIDKey.Int(val) } // ThreadName returns an attribute KeyValue conforming to the "thread.name" // semantic conventions. It represents the current thread name. func ThreadName(val string) attribute.KeyValue { return ThreadNameKey.String(val) } // These attributes allow to report this unit of code and therefore to provide // more context about the span. const ( // CodeFunctionKey is the attribute Key conforming to the "code.function" // semantic conventions. It represents the method or function name, or // equivalent (usually rightmost part of the code unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" // semantic conventions. It represents the "namespace" within which // `code.function` is defined. Usually the qualified class or module name, // such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // CodeFilepathKey is the attribute Key conforming to the "code.filepath" // semantic conventions. It represents the source code file name that // identifies the code unit as uniquely as possible (preferably an absolute // file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") // CodeColumnKey is the attribute Key conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 16 CodeColumnKey = attribute.Key("code.column") ) // CodeFunction returns an attribute KeyValue conforming to the // "code.function" semantic conventions. It represents the method or function // name, or equivalent (usually rightmost part of the code unit's name). func CodeFunction(val string) attribute.KeyValue { return CodeFunctionKey.String(val) } // CodeNamespace returns an attribute KeyValue conforming to the // "code.namespace" semantic conventions. It represents the "namespace" within // which `code.function` is defined. Usually the qualified class or module // name, such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. func CodeNamespace(val string) attribute.KeyValue { return CodeNamespaceKey.String(val) } // CodeFilepath returns an attribute KeyValue conforming to the // "code.filepath" semantic conventions. It represents the source code file // name that identifies the code unit as uniquely as possible (preferably an // absolute file path). func CodeFilepath(val string) attribute.KeyValue { return CodeFilepathKey.String(val) } // CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` best // representing the operation. It SHOULD point within the code unit named in // `code.function`. func CodeLineNumber(val int) attribute.KeyValue { return CodeLineNumberKey.Int(val) } // CodeColumn returns an attribute KeyValue conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit named // in `code.function`. func CodeColumn(val int) attribute.KeyValue { return CodeColumnKey.Int(val) } // Semantic conventions for HTTP client and server Spans. const ( // HTTPMethodKey is the attribute Key conforming to the "http.method" // semantic conventions. It represents the hTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // HTTPStatusCodeKey is the attribute Key conforming to the // "http.status_code" semantic conventions. It represents the [HTTP // response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was // received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" // semantic conventions. It represents the kind of HTTP protocol used. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is // assumed. HTTPFlavorKey = attribute.Key("http.flavor") // HTTPUserAgentKey is the attribute Key conforming to the // "http.user_agent" semantic conventions. It represents the value of the // [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // HTTPRequestContentLengthKey is the attribute Key conforming to the // "http.request_content_length" semantic conventions. It represents the // size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // HTTPResponseContentLengthKey is the attribute Key conforming to the // "http.response_content_length" semantic conventions. It represents the // size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // HTTPMethod returns an attribute KeyValue conforming to the "http.method" // semantic conventions. It represents the hTTP request method. func HTTPMethod(val string) attribute.KeyValue { return HTTPMethodKey.String(val) } // HTTPStatusCode returns an attribute KeyValue conforming to the // "http.status_code" semantic conventions. It represents the [HTTP response // status code](https://tools.ietf.org/html/rfc7231#section-6). func HTTPStatusCode(val int) attribute.KeyValue { return HTTPStatusCodeKey.Int(val) } // HTTPUserAgent returns an attribute KeyValue conforming to the // "http.user_agent" semantic conventions. It represents the value of the [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. func HTTPUserAgent(val string) attribute.KeyValue { return HTTPUserAgentKey.String(val) } // HTTPRequestContentLength returns an attribute KeyValue conforming to the // "http.request_content_length" semantic conventions. It represents the size // of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPRequestContentLength(val int) attribute.KeyValue { return HTTPRequestContentLengthKey.Int(val) } // HTTPResponseContentLength returns an attribute KeyValue conforming to the // "http.response_content_length" semantic conventions. It represents the size // of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPResponseContentLength(val int) attribute.KeyValue { return HTTPResponseContentLengthKey.Int(val) } // Semantic Convention for HTTP Client const ( // HTTPURLKey is the attribute Key conforming to the "http.url" semantic // conventions. It represents the full HTTP request URL in the form // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is // not transmitted over HTTP, but if it is known, it should be included // nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the // attribute's value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // HTTPResendCountKey is the attribute Key conforming to the // "http.resend_count" semantic conventions. It represents the ordinal // number of request resending attempt (for any reason, including // redirects). // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 // Note: The resend count SHOULD be updated each time an HTTP request gets // resent by the client, regardless of what was the cause of the resending // (e.g. redirection, authorization failure, 503 Server Unavailable, // network issues, or any other). HTTPResendCountKey = attribute.Key("http.resend_count") ) // HTTPURL returns an attribute KeyValue conforming to the "http.url" // semantic conventions. It represents the full HTTP request URL in the form // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not // transmitted over HTTP, but if it is known, it should be included // nevertheless. func HTTPURL(val string) attribute.KeyValue { return HTTPURLKey.String(val) } // HTTPResendCount returns an attribute KeyValue conforming to the // "http.resend_count" semantic conventions. It represents the ordinal number // of request resending attempt (for any reason, including redirects). func HTTPResendCount(val int) attribute.KeyValue { return HTTPResendCountKey.Int(val) } // Semantic Convention for HTTP Server const ( // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" // semantic conventions. It represents the URI scheme identifying the used // protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // HTTPTargetKey is the attribute Key conforming to the "http.target" // semantic conventions. It represents the full request target as passed in // a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/path/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") // HTTPRouteKey is the attribute Key conforming to the "http.route" // semantic conventions. It represents the matched route (path template in // the format used by the respective server framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: MUST NOT be populated when this is not supported by the HTTP // server framework as the route attribute should have low-cardinality and // the URI path can NOT substitute it. // SHOULD include the [application root](#http-server-definitions) if there // is one. HTTPRouteKey = attribute.Key("http.route") // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" // semantic conventions. It represents the IP address of the original // client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.sock.peer.addr`, which // would // identify the network-level peer, which may be a proxy. // // This attribute should be set when a source of information different // from the one used for `net.sock.peer.addr`, is available even if that // other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.sock.peer.addr` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" // semantic conventions. It represents the URI scheme identifying the used // protocol. func HTTPScheme(val string) attribute.KeyValue { return HTTPSchemeKey.String(val) } // HTTPTarget returns an attribute KeyValue conforming to the "http.target" // semantic conventions. It represents the full request target as passed in a // HTTP request line or equivalent. func HTTPTarget(val string) attribute.KeyValue { return HTTPTargetKey.String(val) } // HTTPRoute returns an attribute KeyValue conforming to the "http.route" // semantic conventions. It represents the matched route (path template in the // format used by the respective server framework). See note below func HTTPRoute(val string) attribute.KeyValue { return HTTPRouteKey.String(val) } // HTTPClientIP returns an attribute KeyValue conforming to the // "http.client_ip" semantic conventions. It represents the IP address of the // original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). func HTTPClientIP(val string) attribute.KeyValue { return HTTPClientIPKey.String(val) } // Attributes that exist for multiple DynamoDB request types. const ( // AWSDynamoDBTableNamesKey is the attribute Key conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys // in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number }, "TableName": "string", // "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to // the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` // response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, // "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` // request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. // It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the // value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // AWSDynamoDBProjectionKey is the attribute Key conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value // of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, // RelatedItems, ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // AWSDynamoDBLimitKey is the attribute Key conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of // the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // AWSDynamoDBIndexNameKey is the attribute Key conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value // of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // AWSDynamoDBSelectKey is the attribute Key conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of // the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // AWSDynamoDBTableNames returns an attribute KeyValue conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys in // the `RequestItems` object field. func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { return AWSDynamoDBTableNamesKey.StringSlice(val) } // AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to // the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response field. func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { return AWSDynamoDBConsumedCapacityKey.StringSlice(val) } // AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming // to the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` response // field. func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { return AWSDynamoDBItemCollectionMetricsKey.String(val) } // AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_read_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.ReadCapacityUnits` request parameter. func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) } // AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_write_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) } // AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the value // of the `ConsistentRead` request parameter. func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { return AWSDynamoDBConsistentReadKey.Bool(val) } // AWSDynamoDBProjection returns an attribute KeyValue conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value of // the `ProjectionExpression` request parameter. func AWSDynamoDBProjection(val string) attribute.KeyValue { return AWSDynamoDBProjectionKey.String(val) } // AWSDynamoDBLimit returns an attribute KeyValue conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of the // `Limit` request parameter. func AWSDynamoDBLimit(val int) attribute.KeyValue { return AWSDynamoDBLimitKey.Int(val) } // AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to // the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { return AWSDynamoDBAttributesToGetKey.StringSlice(val) } // AWSDynamoDBIndexName returns an attribute KeyValue conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value of // the `IndexName` request parameter. func AWSDynamoDBIndexName(val string) attribute.KeyValue { return AWSDynamoDBIndexNameKey.String(val) } // AWSDynamoDBSelect returns an attribute KeyValue conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of the // `Select` request parameter. func AWSDynamoDBSelect(val string) attribute.KeyValue { return AWSDynamoDBSelectKey.String(val) } // DynamoDB.CreateTable const ( // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_indexes" semantic // conventions. It represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) } // AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming // to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) } // DynamoDB.ListTables const ( // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents // the value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // AWSDynamoDBTableCountKey is the attribute Key conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming // to the "aws.dynamodb.exclusive_start_table" semantic conventions. It // represents the value of the `ExclusiveStartTableName` request parameter. func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { return AWSDynamoDBExclusiveStartTableKey.String(val) } // AWSDynamoDBTableCount returns an attribute KeyValue conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. func AWSDynamoDBTableCount(val int) attribute.KeyValue { return AWSDynamoDBTableCountKey.Int(val) } // DynamoDB.Query const ( // AWSDynamoDBScanForwardKey is the attribute Key conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the // value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // AWSDynamoDBScanForward returns an attribute KeyValue conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of // the `ScanIndexForward` request parameter. func AWSDynamoDBScanForward(val bool) attribute.KeyValue { return AWSDynamoDBScanForwardKey.Bool(val) } // DynamoDB.Scan const ( // AWSDynamoDBSegmentKey is the attribute Key conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of // the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the // value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // AWSDynamoDBCountKey is the attribute Key conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of // the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // AWSDynamoDBScannedCountKey is the attribute Key conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the // value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // AWSDynamoDBSegment returns an attribute KeyValue conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of the // `Segment` request parameter. func AWSDynamoDBSegment(val int) attribute.KeyValue { return AWSDynamoDBSegmentKey.Int(val) } // AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the value // of the `TotalSegments` request parameter. func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { return AWSDynamoDBTotalSegmentsKey.Int(val) } // AWSDynamoDBCount returns an attribute KeyValue conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of the // `Count` response parameter. func AWSDynamoDBCount(val int) attribute.KeyValue { return AWSDynamoDBCountKey.Int(val) } // AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the value // of the `ScannedCount` response parameter. func AWSDynamoDBScannedCount(val int) attribute.KeyValue { return AWSDynamoDBScannedCountKey.Int(val) } // DynamoDB.UpdateTable const ( // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to // the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the // the `GlobalSecondaryIndexUpdates` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming // to the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) } // AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the the // `GlobalSecondaryIndexUpdates` request field. func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) } // Semantic conventions to apply when instrumenting the GraphQL implementation. // They map GraphQL operations to attributes on a Span. const ( // GraphqlOperationNameKey is the attribute Key conforming to the // "graphql.operation.name" semantic conventions. It represents the name of // the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // GraphqlOperationTypeKey is the attribute Key conforming to the // "graphql.operation.type" semantic conventions. It represents the type of // the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // GraphqlDocumentKey is the attribute Key conforming to the // "graphql.document" semantic conventions. It represents the GraphQL // document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // GraphqlOperationName returns an attribute KeyValue conforming to the // "graphql.operation.name" semantic conventions. It represents the name of the // operation being executed. func GraphqlOperationName(val string) attribute.KeyValue { return GraphqlOperationNameKey.String(val) } // GraphqlDocument returns an attribute KeyValue conforming to the // "graphql.document" semantic conventions. It represents the GraphQL document // being executed. func GraphqlDocument(val string) attribute.KeyValue { return GraphqlDocumentKey.String(val) } // Semantic convention describing per-message attributes populated on messaging // spans or links. const ( // MessagingMessageIDKey is the attribute Key conforming to the // "messaging.message.id" semantic conventions. It represents a value used // by the messaging system as an identifier for the message, represented as // a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message.id") // MessagingMessageConversationIDKey is the attribute Key conforming to the // "messaging.message.conversation_id" semantic conventions. It represents // the [conversation ID](#conversations) identifying the conversation to // which the message belongs, represented as a string. Sometimes called // "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to // the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also // use this attribute if it is unknown whether the compressed or // uncompressed payload size is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key // conforming to the "messaging.message.payload_compressed_size_bytes" // semantic conventions. It represents the compressed size of the message // payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ) // MessagingMessageID returns an attribute KeyValue conforming to the // "messaging.message.id" semantic conventions. It represents a value used by // the messaging system as an identifier for the message, represented as a // string. func MessagingMessageID(val string) attribute.KeyValue { return MessagingMessageIDKey.String(val) } // MessagingMessageConversationID returns an attribute KeyValue conforming // to the "messaging.message.conversation_id" semantic conventions. It // represents the [conversation ID](#conversations) identifying the // conversation to which the message belongs, represented as a string. // Sometimes called "Correlation ID". func MessagingMessageConversationID(val string) attribute.KeyValue { return MessagingMessageConversationIDKey.String(val) } // MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming // to the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also use // this attribute if it is unknown whether the compressed or uncompressed // payload size is reported. func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadSizeBytesKey.Int(val) } // MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue // conforming to the "messaging.message.payload_compressed_size_bytes" semantic // conventions. It represents the compressed size of the message payload in // bytes. func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) } // Semantic convention for attributes that describe messaging destination on // broker const ( // MessagingDestinationNameKey is the attribute Key conforming to the // "messaging.destination.name" semantic conventions. It represents the // message destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Destination name SHOULD uniquely identify a specific queue, topic // or other entity within the broker. If // the broker does not have such notion, the destination name SHOULD // uniquely identify the broker. MessagingDestinationNameKey = attribute.Key("messaging.destination.name") // MessagingDestinationKindKey is the attribute Key conforming to the // "messaging.destination.kind" semantic conventions. It represents the // kind of message destination // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") // MessagingDestinationTemplateKey is the attribute Key conforming to the // "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Destination names could be constructed from templates. An example // would be a destination name involving a user name or product id. // Although the destination name in this case is of high cardinality, the // underlying template is of low cardinality and can be effectively used // for grouping and aggregation. MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") // MessagingDestinationTemporaryKey is the attribute Key conforming to the // "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might // not exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") // MessagingDestinationAnonymousKey is the attribute Key conforming to the // "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // MessagingDestinationName returns an attribute KeyValue conforming to the // "messaging.destination.name" semantic conventions. It represents the message // destination name func MessagingDestinationName(val string) attribute.KeyValue { return MessagingDestinationNameKey.String(val) } // MessagingDestinationTemplate returns an attribute KeyValue conforming to // the "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name func MessagingDestinationTemplate(val string) attribute.KeyValue { return MessagingDestinationTemplateKey.String(val) } // MessagingDestinationTemporary returns an attribute KeyValue conforming to // the "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might not // exist anymore after messages are processed. func MessagingDestinationTemporary(val bool) attribute.KeyValue { return MessagingDestinationTemporaryKey.Bool(val) } // MessagingDestinationAnonymous returns an attribute KeyValue conforming to // the "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). func MessagingDestinationAnonymous(val bool) attribute.KeyValue { return MessagingDestinationAnonymousKey.Bool(val) } // Semantic convention for attributes that describe messaging source on broker const ( // MessagingSourceNameKey is the attribute Key conforming to the // "messaging.source.name" semantic conventions. It represents the message // source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Source name SHOULD uniquely identify a specific queue, topic, or // other entity within the broker. If // the broker does not have such notion, the source name SHOULD uniquely // identify the broker. MessagingSourceNameKey = attribute.Key("messaging.source.name") // MessagingSourceKindKey is the attribute Key conforming to the // "messaging.source.kind" semantic conventions. It represents the kind of // message source // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingSourceKindKey = attribute.Key("messaging.source.kind") // MessagingSourceTemplateKey is the attribute Key conforming to the // "messaging.source.template" semantic conventions. It represents the low // cardinality representation of the messaging source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Source names could be constructed from templates. An example would // be a source name involving a user name or product id. Although the // source name in this case is of high cardinality, the underlying template // is of low cardinality and can be effectively used for grouping and // aggregation. MessagingSourceTemplateKey = attribute.Key("messaging.source.template") // MessagingSourceTemporaryKey is the attribute Key conforming to the // "messaging.source.temporary" semantic conventions. It represents a // boolean that is true if the message source is temporary and might not // exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") // MessagingSourceAnonymousKey is the attribute Key conforming to the // "messaging.source.anonymous" semantic conventions. It represents a // boolean that is true if the message source is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") ) var ( // A message received from a queue MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") // A message received from a topic MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") ) // MessagingSourceName returns an attribute KeyValue conforming to the // "messaging.source.name" semantic conventions. It represents the message // source name func MessagingSourceName(val string) attribute.KeyValue { return MessagingSourceNameKey.String(val) } // MessagingSourceTemplate returns an attribute KeyValue conforming to the // "messaging.source.template" semantic conventions. It represents the low // cardinality representation of the messaging source name func MessagingSourceTemplate(val string) attribute.KeyValue { return MessagingSourceTemplateKey.String(val) } // MessagingSourceTemporary returns an attribute KeyValue conforming to the // "messaging.source.temporary" semantic conventions. It represents a boolean // that is true if the message source is temporary and might not exist anymore // after messages are processed. func MessagingSourceTemporary(val bool) attribute.KeyValue { return MessagingSourceTemporaryKey.Bool(val) } // MessagingSourceAnonymous returns an attribute KeyValue conforming to the // "messaging.source.anonymous" semantic conventions. It represents a boolean // that is true if the message source is anonymous (could be unnamed or have // auto-generated name). func MessagingSourceAnonymous(val bool) attribute.KeyValue { return MessagingSourceAnonymousKey.Bool(val) } // General attributes used in messaging systems. const ( // MessagingSystemKey is the attribute Key conforming to the // "messaging.system" semantic conventions. It represents a string // identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // MessagingOperationKey is the attribute Key conforming to the // "messaging.operation" semantic conventions. It represents a string // identifying the kind of messaging operation as defined in the [Operation // names](#operation-names) section above. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: If a custom value is used, it MUST be of low cardinality. MessagingOperationKey = attribute.Key("messaging.operation") // MessagingBatchMessageCountKey is the attribute Key conforming to the // "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the // batching operation. // // Type: int // RequirementLevel: ConditionallyRequired (If the span describes an // operation on a batch of messages.) // Stability: stable // Examples: 0, 1, 2 // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on // spans that operate with a single message. When a messaging client // library supports both batch and single-message API for the same // operation, instrumentations SHOULD use `messaging.batch.message_count` // for batching APIs and SHOULD NOT use it for single-message APIs. MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ) var ( // publish MessagingOperationPublish = MessagingOperationKey.String("publish") // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // MessagingSystem returns an attribute KeyValue conforming to the // "messaging.system" semantic conventions. It represents a string identifying // the messaging system. func MessagingSystem(val string) attribute.KeyValue { return MessagingSystemKey.String(val) } // MessagingBatchMessageCount returns an attribute KeyValue conforming to // the "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the batching // operation. func MessagingBatchMessageCount(val int) attribute.KeyValue { return MessagingBatchMessageCountKey.Int(val) } // Semantic convention for a consumer of messages received from a messaging // system const ( // MessagingConsumerIDKey is the attribute Key conforming to the // "messaging.consumer.id" semantic conventions. It represents the // identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if // both are present, or only `messaging.kafka.consumer.group`. For brokers, // such as RabbitMQ and Artemis, set it to the `client_id` of the client // consuming the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") ) // MessagingConsumerID returns an attribute KeyValue conforming to the // "messaging.consumer.id" semantic conventions. It represents the identifier // for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both // are present, or only `messaging.kafka.consumer.group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. func MessagingConsumerID(val string) attribute.KeyValue { return MessagingConsumerIDKey.String(val) } // Attributes for RabbitMQ const ( // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ) // MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { return MessagingRabbitmqDestinationRoutingKeyKey.String(val) } // Attributes for Apache Kafka const ( // MessagingKafkaMessageKeyKey is the attribute Key conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure // they're processed on the same partition. They differ from // `messaging.message.id` in that they're not unique. If the key is `null`, // the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to // be supplied for the attribute. If the key has no unambiguous, canonical // string form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the // "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only // applies to consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") // MessagingKafkaClientIDKey is the attribute Key conforming to the // "messaging.kafka.client_id" semantic conventions. It represents the // client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to // the "messaging.kafka.destination.partition" semantic conventions. It // represents the partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the // "messaging.kafka.source.partition" semantic conventions. It represents // the partition the message is received from. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the // "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the // "messaging.kafka.message.tombstone" semantic conventions. It represents // a boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When // missing, the value is assumed to be `false`.) // Stability: stable MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ) // MessagingKafkaMessageKey returns an attribute KeyValue conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message.id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be // set. func MessagingKafkaMessageKey(val string) attribute.KeyValue { return MessagingKafkaMessageKeyKey.String(val) } // MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to // the "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only applies // to consumers, not producers. func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { return MessagingKafkaConsumerGroupKey.String(val) } // MessagingKafkaClientID returns an attribute KeyValue conforming to the // "messaging.kafka.client_id" semantic conventions. It represents the client // ID for the Consumer or Producer that is handling the message. func MessagingKafkaClientID(val string) attribute.KeyValue { return MessagingKafkaClientIDKey.String(val) } // MessagingKafkaDestinationPartition returns an attribute KeyValue // conforming to the "messaging.kafka.destination.partition" semantic // conventions. It represents the partition the message is sent to. func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { return MessagingKafkaDestinationPartitionKey.Int(val) } // MessagingKafkaSourcePartition returns an attribute KeyValue conforming to // the "messaging.kafka.source.partition" semantic conventions. It represents // the partition the message is received from. func MessagingKafkaSourcePartition(val int) attribute.KeyValue { return MessagingKafkaSourcePartitionKey.Int(val) } // MessagingKafkaMessageOffset returns an attribute KeyValue conforming to // the "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. func MessagingKafkaMessageOffset(val int) attribute.KeyValue { return MessagingKafkaMessageOffsetKey.Int(val) } // MessagingKafkaMessageTombstone returns an attribute KeyValue conforming // to the "messaging.kafka.message.tombstone" semantic conventions. It // represents a boolean that is true if the message is a tombstone. func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { return MessagingKafkaMessageTombstoneKey.Bool(val) } // Attributes for Apache RocketMQ const ( // MessagingRocketmqNamespaceKey is the attribute Key conforming to the // "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // MessagingRocketmqClientGroupKey is the attribute Key conforming to the // "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // MessagingRocketmqClientIDKey is the attribute Key conforming to the // "messaging.rocketmq.client_id" semantic conventions. It represents the // unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key // conforming to the "messaging.rocketmq.message.delivery_timestamp" // semantic conventions. It represents the timestamp in milliseconds that // the delay message is expected to be delivered to consumer. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delay time level is not specified.) // Stability: stable // Examples: 1665987217045 MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delivery timestamp is not specified.) // Stability: stable // Examples: 3 MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the // "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. // // Type: string // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) // Stability: stable // Examples: 'myMessageGroup' MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the // "messaging.rocketmq.message.type" semantic conventions. It represents // the type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") // MessagingRocketmqMessageTagKey is the attribute Key conforming to the // "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the // "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to // the "messaging.rocketmq.consumption_model" semantic conventions. It // represents the model of message consumption. This only applies to // consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // MessagingRocketmqNamespace returns an attribute KeyValue conforming to // the "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. func MessagingRocketmqNamespace(val string) attribute.KeyValue { return MessagingRocketmqNamespaceKey.String(val) } // MessagingRocketmqClientGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. func MessagingRocketmqClientGroup(val string) attribute.KeyValue { return MessagingRocketmqClientGroupKey.String(val) } // MessagingRocketmqClientID returns an attribute KeyValue conforming to the // "messaging.rocketmq.client_id" semantic conventions. It represents the // unique identifier for each client. func MessagingRocketmqClientID(val string) attribute.KeyValue { return MessagingRocketmqClientIDKey.String(val) } // MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic // conventions. It represents the timestamp in milliseconds that the delay // message is expected to be delivered to consumer. func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) } // MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) } // MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { return MessagingRocketmqMessageGroupKey.String(val) } // MessagingRocketmqMessageTag returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. func MessagingRocketmqMessageTag(val string) attribute.KeyValue { return MessagingRocketmqMessageTagKey.String(val) } // MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { return MessagingRocketmqMessageKeysKey.StringSlice(val) } // Semantic conventions for remote procedure calls. const ( // RPCSystemKey is the attribute Key conforming to the "rpc.system" // semantic conventions. It represents a string identifying the remoting // system. See below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // RPCServiceKey is the attribute Key conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the // service being called, including its package name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing // class. The `code.namespace` attribute may be used to store the latter // (despite the attribute name, it may include a class name; e.g., class // with method actually executing the call on the server side, RPC client // stub class on the client side). RPCServiceKey = attribute.Key("rpc.service") // RPCMethodKey is the attribute Key conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method // being called, must be equal to the $method part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the // latter (e.g., method actually executing the call on the server side, RPC // client stub method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") ) // RPCService returns an attribute KeyValue conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the service // being called, including its package name, if applicable. func RPCService(val string) attribute.KeyValue { return RPCServiceKey.String(val) } // RPCMethod returns an attribute KeyValue conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method being // called, must be equal to the $method part in the span name. func RPCMethod(val string) attribute.KeyValue { return RPCMethodKey.String(val) } // Tech-specific attributes for gRPC. const ( // RPCGRPCStatusCodeKey is the attribute Key conforming to the // "rpc.grpc.status_code" semantic conventions. It represents the [numeric // status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of // the gRPC request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // RPCJsonrpcVersionKey is the attribute Key conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default // version (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // RPCJsonrpcRequestIDKey is the attribute Key conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be // cast to string for simplicity. Use empty string in case of `null` value. // Omit entirely if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPCJsonrpcVersion returns an attribute KeyValue conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. func RPCJsonrpcVersion(val string) attribute.KeyValue { return RPCJsonrpcVersionKey.String(val) } // RPCJsonrpcRequestID returns an attribute KeyValue conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, string, // `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit // entirely if this is a notification. func RPCJsonrpcRequestID(val string) attribute.KeyValue { return RPCJsonrpcRequestIDKey.String(val) } // RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. func RPCJsonrpcErrorCode(val int) attribute.KeyValue { return RPCJsonrpcErrorCodeKey.Int(val) } // RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { return RPCJsonrpcErrorMessageKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.19.0/000077500000000000000000000000001452547353200174725ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.19.0/attribute_group.go000066400000000000000000001433111452547353200232430ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.19.0" import "go.opentelemetry.io/otel/attribute" // Describes HTTP attributes. const ( // HTTPMethodKey is the attribute Key conforming to the "http.method" // semantic conventions. It represents the hTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // HTTPStatusCodeKey is the attribute Key conforming to the // "http.status_code" semantic conventions. It represents the [HTTP // response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was // received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" // semantic conventions. It represents the kind of HTTP protocol used. // // Type: Enum // RequirementLevel: Optional // Stability: stable HTTPFlavorKey = attribute.Key("http.flavor") ) var ( // HTTP/1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP/1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP/2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // HTTP/3 HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // HTTPMethod returns an attribute KeyValue conforming to the "http.method" // semantic conventions. It represents the hTTP request method. func HTTPMethod(val string) attribute.KeyValue { return HTTPMethodKey.String(val) } // HTTPStatusCode returns an attribute KeyValue conforming to the // "http.status_code" semantic conventions. It represents the [HTTP response // status code](https://tools.ietf.org/html/rfc7231#section-6). func HTTPStatusCode(val int) attribute.KeyValue { return HTTPStatusCodeKey.Int(val) } // HTTP Server spans attributes const ( // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" // semantic conventions. It represents the URI scheme identifying the used // protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // HTTPRouteKey is the attribute Key conforming to the "http.route" // semantic conventions. It represents the matched route (path template in // the format used by the respective server framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: MUST NOT be populated when this is not supported by the HTTP // server framework as the route attribute should have low-cardinality and // the URI path can NOT substitute it. // SHOULD include the [application // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) // if there is one. HTTPRouteKey = attribute.Key("http.route") ) // HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" // semantic conventions. It represents the URI scheme identifying the used // protocol. func HTTPScheme(val string) attribute.KeyValue { return HTTPSchemeKey.String(val) } // HTTPRoute returns an attribute KeyValue conforming to the "http.route" // semantic conventions. It represents the matched route (path template in the // format used by the respective server framework). See note below func HTTPRoute(val string) attribute.KeyValue { return HTTPRouteKey.String(val) } // Attributes for Events represented using Log Records. const ( // EventNameKey is the attribute Key conforming to the "event.name" // semantic conventions. It represents the name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") // EventDomainKey is the attribute Key conforming to the "event.domain" // semantic conventions. It represents the domain identifies the business // context for the events. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: Events across different domains may have same `event.name`, yet be // unrelated events. EventDomainKey = attribute.Key("event.domain") ) var ( // Events from browser apps EventDomainBrowser = EventDomainKey.String("browser") // Events from mobile apps EventDomainDevice = EventDomainKey.String("device") // Events from Kubernetes EventDomainK8S = EventDomainKey.String("k8s") ) // EventName returns an attribute KeyValue conforming to the "event.name" // semantic conventions. It represents the name identifies the event. func EventName(val string) attribute.KeyValue { return EventNameKey.String(val) } // These attributes may be used for any network related operation. const ( // NetTransportKey is the attribute Key conforming to the "net.transport" // semantic conventions. It represents the transport protocol used. See // note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") // NetAppProtocolNameKey is the attribute Key conforming to the // "net.app.protocol.name" semantic conventions. It represents the // application layer protocol used. The value SHOULD be normalized to // lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") // NetAppProtocolVersionKey is the attribute Key conforming to the // "net.app.protocol.version" semantic conventions. It represents the // version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `net.app.protocol.version` refers to the version of the protocol // used and might be different from the protocol client's version. If the // HTTP client used has a version of `0.27.2`, but sends HTTP version // `1.1`, this attribute should be set to `1.1`. NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") // NetSockPeerNameKey is the attribute Key conforming to the // "net.sock.peer.name" semantic conventions. It represents the remote // socket peer name. // // Type: string // RequirementLevel: Recommended (If available and different from // `net.peer.name` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // NetSockPeerAddrKey is the attribute Key conforming to the // "net.sock.peer.addr" semantic conventions. It represents the remote // socket peer address: IPv4 or IPv6 for internet protocols, path for local // communication, // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // NetSockPeerPortKey is the attribute Key conforming to the // "net.sock.peer.port" semantic conventions. It represents the remote // socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.peer.port` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // NetSockFamilyKey is the attribute Key conforming to the // "net.sock.family" semantic conventions. It represents the protocol // [address // family](https://man7.org/linux/man-pages/man7/address_families.7.html) // which is used for communication. // // Type: Enum // RequirementLevel: ConditionallyRequired (If different than `inet` and if // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" // semantic conventions. It represents the logical remote hostname, see // note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an // extra DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" // semantic conventions. It represents the logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // NetHostNameKey is the attribute Key conforming to the "net.host.name" // semantic conventions. It represents the logical local hostname or // similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // NetHostPortKey is the attribute Key conforming to the "net.host.port" // semantic conventions. It represents the logical local port number, // preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // NetSockHostAddrKey is the attribute Key conforming to the // "net.sock.host.addr" semantic conventions. It represents the local // socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // NetSockHostPortKey is the attribute Key conforming to the // "net.sock.host.port" semantic conventions. It represents the local // socket port number. // // Type: int // RequirementLevel: ConditionallyRequired (If defined for the address // family and if different than `net.host.port` and if `net.sock.host.addr` // is set. In other cases, it is still recommended to set this.) // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") // NetHostConnectionTypeKey is the attribute Key conforming to the // "net.host.connection.type" semantic conventions. It represents the // internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // NetHostConnectionSubtypeKey is the attribute Key conforming to the // "net.host.connection.subtype" semantic conventions. It represents the // this describes more details regarding the connection.type. It may be the // type of cell technology connection, but it could be used for describing // details about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // NetHostCarrierNameKey is the attribute Key conforming to the // "net.host.carrier.name" semantic conventions. It represents the name of // the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // NetHostCarrierMccKey is the attribute Key conforming to the // "net.host.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // NetHostCarrierMncKey is the attribute Key conforming to the // "net.host.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // NetHostCarrierIccKey is the attribute Key conforming to the // "net.host.carrier.icc" semantic conventions. It represents the ISO // 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // NetAppProtocolName returns an attribute KeyValue conforming to the // "net.app.protocol.name" semantic conventions. It represents the application // layer protocol used. The value SHOULD be normalized to lowercase. func NetAppProtocolName(val string) attribute.KeyValue { return NetAppProtocolNameKey.String(val) } // NetAppProtocolVersion returns an attribute KeyValue conforming to the // "net.app.protocol.version" semantic conventions. It represents the version // of the application layer protocol used. See note below. func NetAppProtocolVersion(val string) attribute.KeyValue { return NetAppProtocolVersionKey.String(val) } // NetSockPeerName returns an attribute KeyValue conforming to the // "net.sock.peer.name" semantic conventions. It represents the remote socket // peer name. func NetSockPeerName(val string) attribute.KeyValue { return NetSockPeerNameKey.String(val) } // NetSockPeerAddr returns an attribute KeyValue conforming to the // "net.sock.peer.addr" semantic conventions. It represents the remote socket // peer address: IPv4 or IPv6 for internet protocols, path for local // communication, // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). func NetSockPeerAddr(val string) attribute.KeyValue { return NetSockPeerAddrKey.String(val) } // NetSockPeerPort returns an attribute KeyValue conforming to the // "net.sock.peer.port" semantic conventions. It represents the remote socket // peer port. func NetSockPeerPort(val int) attribute.KeyValue { return NetSockPeerPortKey.Int(val) } // NetPeerName returns an attribute KeyValue conforming to the // "net.peer.name" semantic conventions. It represents the logical remote // hostname, see note below. func NetPeerName(val string) attribute.KeyValue { return NetPeerNameKey.String(val) } // NetPeerPort returns an attribute KeyValue conforming to the // "net.peer.port" semantic conventions. It represents the logical remote port // number func NetPeerPort(val int) attribute.KeyValue { return NetPeerPortKey.Int(val) } // NetHostName returns an attribute KeyValue conforming to the // "net.host.name" semantic conventions. It represents the logical local // hostname or similar, see note below. func NetHostName(val string) attribute.KeyValue { return NetHostNameKey.String(val) } // NetHostPort returns an attribute KeyValue conforming to the // "net.host.port" semantic conventions. It represents the logical local port // number, preferably the one that the peer used to connect func NetHostPort(val int) attribute.KeyValue { return NetHostPortKey.Int(val) } // NetSockHostAddr returns an attribute KeyValue conforming to the // "net.sock.host.addr" semantic conventions. It represents the local socket // address. Useful in case of a multi-IP host. func NetSockHostAddr(val string) attribute.KeyValue { return NetSockHostAddrKey.String(val) } // NetSockHostPort returns an attribute KeyValue conforming to the // "net.sock.host.port" semantic conventions. It represents the local socket // port number. func NetSockHostPort(val int) attribute.KeyValue { return NetSockHostPortKey.Int(val) } // NetHostCarrierName returns an attribute KeyValue conforming to the // "net.host.carrier.name" semantic conventions. It represents the name of the // mobile carrier. func NetHostCarrierName(val string) attribute.KeyValue { return NetHostCarrierNameKey.String(val) } // NetHostCarrierMcc returns an attribute KeyValue conforming to the // "net.host.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. func NetHostCarrierMcc(val string) attribute.KeyValue { return NetHostCarrierMccKey.String(val) } // NetHostCarrierMnc returns an attribute KeyValue conforming to the // "net.host.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. func NetHostCarrierMnc(val string) attribute.KeyValue { return NetHostCarrierMncKey.String(val) } // NetHostCarrierIcc returns an attribute KeyValue conforming to the // "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 // alpha-2 2-character country code associated with the mobile carrier network. func NetHostCarrierIcc(val string) attribute.KeyValue { return NetHostCarrierIccKey.String(val) } // Semantic conventions for HTTP client and server Spans. const ( // HTTPRequestContentLengthKey is the attribute Key conforming to the // "http.request_content_length" semantic conventions. It represents the // size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // HTTPResponseContentLengthKey is the attribute Key conforming to the // "http.response_content_length" semantic conventions. It represents the // size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) // HTTPRequestContentLength returns an attribute KeyValue conforming to the // "http.request_content_length" semantic conventions. It represents the size // of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPRequestContentLength(val int) attribute.KeyValue { return HTTPRequestContentLengthKey.Int(val) } // HTTPResponseContentLength returns an attribute KeyValue conforming to the // "http.response_content_length" semantic conventions. It represents the size // of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPResponseContentLength(val int) attribute.KeyValue { return HTTPResponseContentLengthKey.Int(val) } // Semantic convention describing per-message attributes populated on messaging // spans or links. const ( // MessagingMessageIDKey is the attribute Key conforming to the // "messaging.message.id" semantic conventions. It represents a value used // by the messaging system as an identifier for the message, represented as // a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message.id") // MessagingMessageConversationIDKey is the attribute Key conforming to the // "messaging.message.conversation_id" semantic conventions. It represents // the [conversation ID](#conversations) identifying the conversation to // which the message belongs, represented as a string. Sometimes called // "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to // the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also // use this attribute if it is unknown whether the compressed or // uncompressed payload size is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key // conforming to the "messaging.message.payload_compressed_size_bytes" // semantic conventions. It represents the compressed size of the message // payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ) // MessagingMessageID returns an attribute KeyValue conforming to the // "messaging.message.id" semantic conventions. It represents a value used by // the messaging system as an identifier for the message, represented as a // string. func MessagingMessageID(val string) attribute.KeyValue { return MessagingMessageIDKey.String(val) } // MessagingMessageConversationID returns an attribute KeyValue conforming // to the "messaging.message.conversation_id" semantic conventions. It // represents the [conversation ID](#conversations) identifying the // conversation to which the message belongs, represented as a string. // Sometimes called "Correlation ID". func MessagingMessageConversationID(val string) attribute.KeyValue { return MessagingMessageConversationIDKey.String(val) } // MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming // to the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also use // this attribute if it is unknown whether the compressed or uncompressed // payload size is reported. func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadSizeBytesKey.Int(val) } // MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue // conforming to the "messaging.message.payload_compressed_size_bytes" semantic // conventions. It represents the compressed size of the message payload in // bytes. func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) } // Semantic convention for attributes that describe messaging destination on // broker const ( // MessagingDestinationNameKey is the attribute Key conforming to the // "messaging.destination.name" semantic conventions. It represents the // message destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Destination name SHOULD uniquely identify a specific queue, topic // or other entity within the broker. If // the broker does not have such notion, the destination name SHOULD // uniquely identify the broker. MessagingDestinationNameKey = attribute.Key("messaging.destination.name") // MessagingDestinationKindKey is the attribute Key conforming to the // "messaging.destination.kind" semantic conventions. It represents the // kind of message destination // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") // MessagingDestinationTemplateKey is the attribute Key conforming to the // "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Destination names could be constructed from templates. An example // would be a destination name involving a user name or product id. // Although the destination name in this case is of high cardinality, the // underlying template is of low cardinality and can be effectively used // for grouping and aggregation. MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") // MessagingDestinationTemporaryKey is the attribute Key conforming to the // "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might // not exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") // MessagingDestinationAnonymousKey is the attribute Key conforming to the // "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // MessagingDestinationName returns an attribute KeyValue conforming to the // "messaging.destination.name" semantic conventions. It represents the message // destination name func MessagingDestinationName(val string) attribute.KeyValue { return MessagingDestinationNameKey.String(val) } // MessagingDestinationTemplate returns an attribute KeyValue conforming to // the "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name func MessagingDestinationTemplate(val string) attribute.KeyValue { return MessagingDestinationTemplateKey.String(val) } // MessagingDestinationTemporary returns an attribute KeyValue conforming to // the "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might not // exist anymore after messages are processed. func MessagingDestinationTemporary(val bool) attribute.KeyValue { return MessagingDestinationTemporaryKey.Bool(val) } // MessagingDestinationAnonymous returns an attribute KeyValue conforming to // the "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). func MessagingDestinationAnonymous(val bool) attribute.KeyValue { return MessagingDestinationAnonymousKey.Bool(val) } // Semantic convention for attributes that describe messaging source on broker const ( // MessagingSourceNameKey is the attribute Key conforming to the // "messaging.source.name" semantic conventions. It represents the message // source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Source name SHOULD uniquely identify a specific queue, topic, or // other entity within the broker. If // the broker does not have such notion, the source name SHOULD uniquely // identify the broker. MessagingSourceNameKey = attribute.Key("messaging.source.name") // MessagingSourceKindKey is the attribute Key conforming to the // "messaging.source.kind" semantic conventions. It represents the kind of // message source // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingSourceKindKey = attribute.Key("messaging.source.kind") // MessagingSourceTemplateKey is the attribute Key conforming to the // "messaging.source.template" semantic conventions. It represents the low // cardinality representation of the messaging source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Source names could be constructed from templates. An example would // be a source name involving a user name or product id. Although the // source name in this case is of high cardinality, the underlying template // is of low cardinality and can be effectively used for grouping and // aggregation. MessagingSourceTemplateKey = attribute.Key("messaging.source.template") // MessagingSourceTemporaryKey is the attribute Key conforming to the // "messaging.source.temporary" semantic conventions. It represents a // boolean that is true if the message source is temporary and might not // exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") // MessagingSourceAnonymousKey is the attribute Key conforming to the // "messaging.source.anonymous" semantic conventions. It represents a // boolean that is true if the message source is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") ) var ( // A message received from a queue MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") // A message received from a topic MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") ) // MessagingSourceName returns an attribute KeyValue conforming to the // "messaging.source.name" semantic conventions. It represents the message // source name func MessagingSourceName(val string) attribute.KeyValue { return MessagingSourceNameKey.String(val) } // MessagingSourceTemplate returns an attribute KeyValue conforming to the // "messaging.source.template" semantic conventions. It represents the low // cardinality representation of the messaging source name func MessagingSourceTemplate(val string) attribute.KeyValue { return MessagingSourceTemplateKey.String(val) } // MessagingSourceTemporary returns an attribute KeyValue conforming to the // "messaging.source.temporary" semantic conventions. It represents a boolean // that is true if the message source is temporary and might not exist anymore // after messages are processed. func MessagingSourceTemporary(val bool) attribute.KeyValue { return MessagingSourceTemporaryKey.Bool(val) } // MessagingSourceAnonymous returns an attribute KeyValue conforming to the // "messaging.source.anonymous" semantic conventions. It represents a boolean // that is true if the message source is anonymous (could be unnamed or have // auto-generated name). func MessagingSourceAnonymous(val bool) attribute.KeyValue { return MessagingSourceAnonymousKey.Bool(val) } // Attributes for RabbitMQ const ( // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ) // MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { return MessagingRabbitmqDestinationRoutingKeyKey.String(val) } // Attributes for Apache Kafka const ( // MessagingKafkaMessageKeyKey is the attribute Key conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure // they're processed on the same partition. They differ from // `messaging.message.id` in that they're not unique. If the key is `null`, // the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to // be supplied for the attribute. If the key has no unambiguous, canonical // string form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the // "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only // applies to consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") // MessagingKafkaClientIDKey is the attribute Key conforming to the // "messaging.kafka.client_id" semantic conventions. It represents the // client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to // the "messaging.kafka.destination.partition" semantic conventions. It // represents the partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the // "messaging.kafka.source.partition" semantic conventions. It represents // the partition the message is received from. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the // "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the // "messaging.kafka.message.tombstone" semantic conventions. It represents // a boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When // missing, the value is assumed to be `false`.) // Stability: stable MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ) // MessagingKafkaMessageKey returns an attribute KeyValue conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message.id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be // set. func MessagingKafkaMessageKey(val string) attribute.KeyValue { return MessagingKafkaMessageKeyKey.String(val) } // MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to // the "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only applies // to consumers, not producers. func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { return MessagingKafkaConsumerGroupKey.String(val) } // MessagingKafkaClientID returns an attribute KeyValue conforming to the // "messaging.kafka.client_id" semantic conventions. It represents the client // ID for the Consumer or Producer that is handling the message. func MessagingKafkaClientID(val string) attribute.KeyValue { return MessagingKafkaClientIDKey.String(val) } // MessagingKafkaDestinationPartition returns an attribute KeyValue // conforming to the "messaging.kafka.destination.partition" semantic // conventions. It represents the partition the message is sent to. func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { return MessagingKafkaDestinationPartitionKey.Int(val) } // MessagingKafkaSourcePartition returns an attribute KeyValue conforming to // the "messaging.kafka.source.partition" semantic conventions. It represents // the partition the message is received from. func MessagingKafkaSourcePartition(val int) attribute.KeyValue { return MessagingKafkaSourcePartitionKey.Int(val) } // MessagingKafkaMessageOffset returns an attribute KeyValue conforming to // the "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. func MessagingKafkaMessageOffset(val int) attribute.KeyValue { return MessagingKafkaMessageOffsetKey.Int(val) } // MessagingKafkaMessageTombstone returns an attribute KeyValue conforming // to the "messaging.kafka.message.tombstone" semantic conventions. It // represents a boolean that is true if the message is a tombstone. func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { return MessagingKafkaMessageTombstoneKey.Bool(val) } // Attributes for Apache RocketMQ const ( // MessagingRocketmqNamespaceKey is the attribute Key conforming to the // "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // MessagingRocketmqClientGroupKey is the attribute Key conforming to the // "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // MessagingRocketmqClientIDKey is the attribute Key conforming to the // "messaging.rocketmq.client_id" semantic conventions. It represents the // unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key // conforming to the "messaging.rocketmq.message.delivery_timestamp" // semantic conventions. It represents the timestamp in milliseconds that // the delay message is expected to be delivered to consumer. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delay time level is not specified.) // Stability: stable // Examples: 1665987217045 MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delivery timestamp is not specified.) // Stability: stable // Examples: 3 MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the // "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. // // Type: string // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) // Stability: stable // Examples: 'myMessageGroup' MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the // "messaging.rocketmq.message.type" semantic conventions. It represents // the type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") // MessagingRocketmqMessageTagKey is the attribute Key conforming to the // "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the // "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to // the "messaging.rocketmq.consumption_model" semantic conventions. It // represents the model of message consumption. This only applies to // consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // MessagingRocketmqNamespace returns an attribute KeyValue conforming to // the "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. func MessagingRocketmqNamespace(val string) attribute.KeyValue { return MessagingRocketmqNamespaceKey.String(val) } // MessagingRocketmqClientGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. func MessagingRocketmqClientGroup(val string) attribute.KeyValue { return MessagingRocketmqClientGroupKey.String(val) } // MessagingRocketmqClientID returns an attribute KeyValue conforming to the // "messaging.rocketmq.client_id" semantic conventions. It represents the // unique identifier for each client. func MessagingRocketmqClientID(val string) attribute.KeyValue { return MessagingRocketmqClientIDKey.String(val) } // MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic // conventions. It represents the timestamp in milliseconds that the delay // message is expected to be delivered to consumer. func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) } // MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) } // MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { return MessagingRocketmqMessageGroupKey.String(val) } // MessagingRocketmqMessageTag returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. func MessagingRocketmqMessageTag(val string) attribute.KeyValue { return MessagingRocketmqMessageTagKey.String(val) } // MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { return MessagingRocketmqMessageKeysKey.StringSlice(val) } // Describes user-agent attributes. const ( // UserAgentOriginalKey is the attribute Key conforming to the // "user_agent.original" semantic conventions. It represents the value of // the [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' UserAgentOriginalKey = attribute.Key("user_agent.original") ) // UserAgentOriginal returns an attribute KeyValue conforming to the // "user_agent.original" semantic conventions. It represents the value of the // [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. func UserAgentOriginal(val string) attribute.KeyValue { return UserAgentOriginalKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.19.0/doc.go000066400000000000000000000016641452547353200205750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.19.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.19.0" opentelemetry-go-1.21.0/semconv/v1.19.0/event.go000066400000000000000000000173141452547353200211500ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.19.0" import "go.opentelemetry.io/otel/attribute" // This semantic convention defines the attributes used to represent a feature // flag evaluation as an event. const ( // FeatureFlagKeyKey is the attribute Key conforming to the // "feature_flag.key" semantic conventions. It represents the unique // identifier of the feature flag. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'logo-color' FeatureFlagKeyKey = attribute.Key("feature_flag.key") // FeatureFlagProviderNameKey is the attribute Key conforming to the // "feature_flag.provider_name" semantic conventions. It represents the // name of the service provider that performs the flag evaluation. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'Flag Manager' FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") // FeatureFlagVariantKey is the attribute Key conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be // a semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'red', 'true', 'on' // Note: A semantic identifier, commonly referred to as a variant, provides // a means // for referring to a value without including the value itself. This can // provide additional context for understanding the meaning behind a value. // For example, the variant `red` maybe be used for the value `#c05543`. // // A stringified version of the value can be used in situations where a // semantic identifier is unavailable. String representation of the value // should be determined by the implementer. FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ) // FeatureFlagKey returns an attribute KeyValue conforming to the // "feature_flag.key" semantic conventions. It represents the unique identifier // of the feature flag. func FeatureFlagKey(val string) attribute.KeyValue { return FeatureFlagKeyKey.String(val) } // FeatureFlagProviderName returns an attribute KeyValue conforming to the // "feature_flag.provider_name" semantic conventions. It represents the name of // the service provider that performs the flag evaluation. func FeatureFlagProviderName(val string) attribute.KeyValue { return FeatureFlagProviderNameKey.String(val) } // FeatureFlagVariant returns an attribute KeyValue conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be a // semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. func FeatureFlagVariant(val string) attribute.KeyValue { return FeatureFlagVariantKey.String(val) } // RPC received/sent message. const ( // MessageTypeKey is the attribute Key conforming to the "message.type" // semantic conventions. It represents the whether this is a received or // sent message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessageTypeKey = attribute.Key("message.type") // MessageIDKey is the attribute Key conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two // different counters starting from `1` one for sent messages and one for // received message. // // Type: int // RequirementLevel: Optional // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // MessageCompressedSizeKey is the attribute Key conforming to the // "message.compressed_size" semantic conventions. It represents the // compressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // MessageUncompressedSizeKey is the attribute Key conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) // MessageID returns an attribute KeyValue conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two different // counters starting from `1` one for sent messages and one for received // message. func MessageID(val int) attribute.KeyValue { return MessageIDKey.Int(val) } // MessageCompressedSize returns an attribute KeyValue conforming to the // "message.compressed_size" semantic conventions. It represents the compressed // size of the message in bytes. func MessageCompressedSize(val int) attribute.KeyValue { return MessageCompressedSizeKey.Int(val) } // MessageUncompressedSize returns an attribute KeyValue conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. func MessageUncompressedSize(val int) attribute.KeyValue { return MessageUncompressedSizeKey.Int(val) } // The attributes used to report a single exception associated with a span. const ( // ExceptionEscapedKey is the attribute Key conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be // set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of // a span, // if that span is ended while the exception is still logically "in // flight". // This may be actually "in flight" in some languages (e.g. if the // exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most // languages. // // It is usually not possible to determine at the point where an exception // is thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending // the span, // as done in the [example above](#recording-an-exception). // // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // ExceptionEscaped returns an attribute KeyValue conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be set to // true if the exception event is recorded at a point where it is known that // the exception is escaping the scope of the span. func ExceptionEscaped(val bool) attribute.KeyValue { return ExceptionEscapedKey.Bool(val) } opentelemetry-go-1.21.0/semconv/v1.19.0/exception.go000066400000000000000000000014301452547353200220150ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.19.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.19.0/http.go000066400000000000000000000014401452547353200207770ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.19.0" // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) opentelemetry-go-1.21.0/semconv/v1.19.0/httpconv/000077500000000000000000000000001452547353200213375ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.19.0/httpconv/http.go000066400000000000000000000146771452547353200226640ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. package httpconv // import "go.opentelemetry.io/otel/semconv/v1.19.0/httpconv" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal/v3" semconv "go.opentelemetry.io/otel/semconv/v1.19.0" ) var ( nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } hc = &internal.HTTPConv{ NetConv: nc, EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, HTTPRouteKey: semconv.HTTPRouteKey, HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, UserAgentOriginalKey: semconv.UserAgentOriginalKey, } ) // ClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", // "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func ClientResponse(resp *http.Response) []attribute.KeyValue { return hc.ClientResponse(resp) } // ClientRequest returns trace attributes for an HTTP request made by a client. // The following attributes are always returned: "http.url", "http.flavor", // "http.method", "net.peer.name". The following attributes are returned if the // related values are defined in req: "net.peer.port", "http.user_agent", // "http.request_content_length", "enduser.id". func ClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func ClientStatus(code int) (codes.Code, string) { return hc.ClientStatus(code) } // ServerRequest returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // "http.flavor", "http.target", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port", // "net.sock.peer.addr", "net.sock.peer.port", "user_agent.original", "enduser.id", // "http.client_ip". func ServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func ServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } // RequestHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the user_agent.original attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func RequestHeader(h http.Header) []attribute.KeyValue { return hc.RequestHeader(h) } // ResponseHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the user_agent.original attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func ResponseHeader(h http.Header) []attribute.KeyValue { return hc.ResponseHeader(h) } opentelemetry-go-1.21.0/semconv/v1.19.0/netconv/000077500000000000000000000000001452547353200211465ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.19.0/netconv/net.go000066400000000000000000000053211452547353200222640ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconv provides OpenTelemetry network semantic conventions for // tracing telemetry. package netconv // import "go.opentelemetry.io/otel/semconv/v1.19.0/netconv" import ( "net" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv/internal/v3" semconv "go.opentelemetry.io/otel/semconv/v1.19.0" ) var nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetSockHostAddrKey: semconv.NetSockHostAddrKey, NetSockHostPortKey: semconv.NetSockHostPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } // Transport returns a trace attribute describing the transport protocol of the // passed network. See the net.Dial for information about acceptable network // values. func Transport(network string) attribute.KeyValue { return nc.Transport(network) } // Client returns trace attributes for a client network connection to address. // See net.Dial for information about acceptable address values, address should // be the same as the one used to create conn. If conn is nil, only network // peer attributes will be returned that describe address. Otherwise, the // socket level information about conn will also be included. func Client(address string, conn net.Conn) []attribute.KeyValue { return nc.Client(address, conn) } // Server returns trace attributes for a network listener listening at address. // See net.Listen for information about acceptable address values, address // should be the same as the one used to create ln. If ln is nil, only network // host attributes will be returned that describe address. Otherwise, the // socket level information about ln will also be included. func Server(address string, ln net.Listener) []attribute.KeyValue { return nc.Server(address, ln) } opentelemetry-go-1.21.0/semconv/v1.19.0/resource.go000066400000000000000000002353711452547353200216630ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.19.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is // running. The `browser.*` attributes MUST be used only for resources that // represent applications running in a web browser (regardless of whether // running on a mobile or desktop device). const ( // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" // semantic conventions. It represents the array of brand name and version // separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") // BrowserPlatformKey is the attribute Key conforming to the // "browser.platform" semantic conventions. It represents the platform on // which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute // SHOULD be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client // Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in // the [`os.type` and `os.name` attributes](./os.md). However, for // consistency, the values in the `browser.platform` attribute should // capture the exact value that the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" // semantic conventions. It represents a boolean that is true if the // browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.mobile`). If unavailable, this attribute // SHOULD be left unset. BrowserMobileKey = attribute.Key("browser.mobile") // BrowserLanguageKey is the attribute Key conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'en', 'en-US', 'fr', 'fr-FR' // Note: This value is intended to be taken from the Navigator API // `navigator.language`. BrowserLanguageKey = attribute.Key("browser.language") ) // BrowserBrands returns an attribute KeyValue conforming to the // "browser.brands" semantic conventions. It represents the array of brand name // and version separated by a space func BrowserBrands(val ...string) attribute.KeyValue { return BrowserBrandsKey.StringSlice(val) } // BrowserPlatform returns an attribute KeyValue conforming to the // "browser.platform" semantic conventions. It represents the platform on which // the browser is running func BrowserPlatform(val string) attribute.KeyValue { return BrowserPlatformKey.String(val) } // BrowserMobile returns an attribute KeyValue conforming to the // "browser.mobile" semantic conventions. It represents a boolean that is true // if the browser is running on a mobile device func BrowserMobile(val bool) attribute.KeyValue { return BrowserMobileKey.Bool(val) } // BrowserLanguage returns an attribute KeyValue conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser func BrowserLanguage(val string) attribute.KeyValue { return BrowserLanguageKey.String(val) } // A cloud environment (e.g. GCP, Azure, AWS) const ( // CloudProviderKey is the attribute Key conforming to the "cloud.provider" // semantic conventions. It represents the name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // CloudAccountIDKey is the attribute Key conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account // ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // CloudRegionKey is the attribute Key conforming to the "cloud.region" // semantic conventions. It represents the geographical region the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for // example [Alibaba Cloud // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), // [Google Cloud regions](https://cloud.google.com/about/locations), or // [Tencent Cloud // regions](https://www.tencentcloud.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // CloudResourceIDKey is the attribute Key conforming to the // "cloud.resource_id" semantic conventions. It represents the cloud // provider-specific native identifier of the monitored cloud resource // (e.g. an // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // on AWS, a [fully qualified resource // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // on Azure, a [full resource // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) // on GCP) // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' // Note: On some cloud providers, it may not be possible to determine the // full ID at startup, // so it may be necessary to set `cloud.resource_id` as a span attribute // instead. // // The exact value to use for `cloud.resource_id` depends on the cloud // provider. // The following well-known definitions MUST be used if you set this // attribute and they apply: // // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) // with the resolved function version, as the same runtime instance may // be invokable with // multiple different aliases. // * **GCP:** The [URI of the // resource](https://cloud.google.com/iam/docs/full-resource-names) // * **Azure:** The [Fully Qualified Resource // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider. CloudResourceIDKey = attribute.Key("cloud.resource_id") // CloudAvailabilityZoneKey is the attribute Key conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the // resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google // Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" // semantic conventions. It represents the cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Heroku Platform as a Service CloudProviderHeroku = CloudProviderKey.String("heroku") // IBM Cloud CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // Red Hat OpenShift on Alibaba Cloud CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Red Hat OpenShift on AWS (ROSA) CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Azure Red Hat OpenShift CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Red Hat OpenShift on Google Cloud CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") // Red Hat OpenShift on IBM Cloud CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // CloudAccountID returns an attribute KeyValue conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account ID // the resource is assigned to. func CloudAccountID(val string) attribute.KeyValue { return CloudAccountIDKey.String(val) } // CloudRegion returns an attribute KeyValue conforming to the // "cloud.region" semantic conventions. It represents the geographical region // the resource is running. func CloudRegion(val string) attribute.KeyValue { return CloudRegionKey.String(val) } // CloudResourceID returns an attribute KeyValue conforming to the // "cloud.resource_id" semantic conventions. It represents the cloud // provider-specific native identifier of the monitored cloud resource (e.g. an // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // on AWS, a [fully qualified resource // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // on Azure, a [full resource // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) // on GCP) func CloudResourceID(val string) attribute.KeyValue { return CloudResourceIDKey.String(val) } // CloudAvailabilityZone returns an attribute KeyValue conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to increase // availability. Availability zone represents the zone where the resource is // running. func CloudAvailabilityZone(val string) attribute.KeyValue { return CloudAvailabilityZoneKey.String(val) } // Resources used by AWS Elastic Container Service (ECS). const ( // AWSECSContainerARNKey is the attribute Key conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // AWSECSClusterARNKey is the attribute Key conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an // [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // AWSECSLaunchtypeKey is the attribute Key conforming to the // "aws.ecs.launchtype" semantic conventions. It represents the [launch // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) // for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // AWSECSTaskARNKey is the attribute Key conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an // [ECS task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // AWSECSTaskFamilyKey is the attribute Key conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // AWSECSTaskRevisionKey is the attribute Key conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision // for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // AWSECSContainerARN returns an attribute KeyValue conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). func AWSECSContainerARN(val string) attribute.KeyValue { return AWSECSContainerARNKey.String(val) } // AWSECSClusterARN returns an attribute KeyValue conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). func AWSECSClusterARN(val string) attribute.KeyValue { return AWSECSClusterARNKey.String(val) } // AWSECSTaskARN returns an attribute KeyValue conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS // task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). func AWSECSTaskARN(val string) attribute.KeyValue { return AWSECSTaskARNKey.String(val) } // AWSECSTaskFamily returns an attribute KeyValue conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. func AWSECSTaskFamily(val string) attribute.KeyValue { return AWSECSTaskFamilyKey.String(val) } // AWSECSTaskRevision returns an attribute KeyValue conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision for // this task definition. func AWSECSTaskRevision(val string) attribute.KeyValue { return AWSECSTaskRevisionKey.String(val) } // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // AWSEKSClusterARNKey is the attribute Key conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an // EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // AWSEKSClusterARN returns an attribute KeyValue conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS // cluster. func AWSEKSClusterARN(val string) attribute.KeyValue { return AWSEKSClusterARNKey.String(val) } // Resources specific to Amazon Web Services. const ( // AWSLogGroupNamesKey is the attribute Key conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of // the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like // multi-container applications, where a single application has sidecar // containers, and each write to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // AWSLogGroupARNsKey is the attribute Key conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon // Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // AWSLogStreamNamesKey is the attribute Key conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) // of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // AWSLogStreamARNsKey is the attribute Key conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of // the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). // One log group can contain several log streams, so these ARNs necessarily // identify both a log group and a log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // AWSLogGroupNames returns an attribute KeyValue conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of the // AWS log group(s) an application is writing to. func AWSLogGroupNames(val ...string) attribute.KeyValue { return AWSLogGroupNamesKey.StringSlice(val) } // AWSLogGroupARNs returns an attribute KeyValue conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource // Name(s) (ARN) of the AWS log group(s). func AWSLogGroupARNs(val ...string) attribute.KeyValue { return AWSLogGroupARNsKey.StringSlice(val) } // AWSLogStreamNames returns an attribute KeyValue conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) of // the AWS log stream(s) an application is writing to. func AWSLogStreamNames(val ...string) attribute.KeyValue { return AWSLogStreamNamesKey.StringSlice(val) } // AWSLogStreamARNs returns an attribute KeyValue conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the // AWS log stream(s). func AWSLogStreamARNs(val ...string) attribute.KeyValue { return AWSLogStreamARNsKey.StringSlice(val) } // Heroku dyno metadata const ( // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the // "heroku.release.creation_timestamp" semantic conventions. It represents // the time and date the release was created // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2022-10-23T18:00:42Z' HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") // HerokuReleaseCommitKey is the attribute Key conforming to the // "heroku.release.commit" semantic conventions. It represents the commit // hash for the current release // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" // semantic conventions. It represents the unique identifier for the // application // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' HerokuAppIDKey = attribute.Key("heroku.app.id") ) // HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming // to the "heroku.release.creation_timestamp" semantic conventions. It // represents the time and date the release was created func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { return HerokuReleaseCreationTimestampKey.String(val) } // HerokuReleaseCommit returns an attribute KeyValue conforming to the // "heroku.release.commit" semantic conventions. It represents the commit hash // for the current release func HerokuReleaseCommit(val string) attribute.KeyValue { return HerokuReleaseCommitKey.String(val) } // HerokuAppID returns an attribute KeyValue conforming to the // "heroku.app.id" semantic conventions. It represents the unique identifier // for the application func HerokuAppID(val string) attribute.KeyValue { return HerokuAppIDKey.String(val) } // A container instance. const ( // ContainerNameKey is the attribute Key conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // ContainerIDKey is the attribute Key conforming to the "container.id" // semantic conventions. It represents the container ID. Usually a UUID, as // for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // ContainerRuntimeKey is the attribute Key conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // ContainerImageNameKey is the attribute Key conforming to the // "container.image.name" semantic conventions. It represents the name of // the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // ContainerImageTagKey is the attribute Key conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // ContainerName returns an attribute KeyValue conforming to the // "container.name" semantic conventions. It represents the container name used // by container runtime. func ContainerName(val string) attribute.KeyValue { return ContainerNameKey.String(val) } // ContainerID returns an attribute KeyValue conforming to the // "container.id" semantic conventions. It represents the container ID. Usually // a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. func ContainerID(val string) attribute.KeyValue { return ContainerIDKey.String(val) } // ContainerRuntime returns an attribute KeyValue conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. func ContainerRuntime(val string) attribute.KeyValue { return ContainerRuntimeKey.String(val) } // ContainerImageName returns an attribute KeyValue conforming to the // "container.image.name" semantic conventions. It represents the name of the // image the container was built on. func ContainerImageName(val string) attribute.KeyValue { return ContainerImageNameKey.String(val) } // ContainerImageTag returns an attribute KeyValue conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. func ContainerImageTag(val string) attribute.KeyValue { return ContainerImageTagKey.String(val) } // The software deployment. const ( // DeploymentEnvironmentKey is the attribute Key conforming to the // "deployment.environment" semantic conventions. It represents the name of // the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // DeploymentEnvironment returns an attribute KeyValue conforming to the // "deployment.environment" semantic conventions. It represents the name of the // [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). func DeploymentEnvironment(val string) attribute.KeyValue { return DeploymentEnvironmentKey.String(val) } // The device on which the process represented by this resource is running. const ( // DeviceIDKey is the attribute Key conforming to the "device.id" semantic // conventions. It represents a unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values // outlined below. This value is not an advertising identifier and MUST NOT // be used as such. On iOS (Swift or Objective-C), this value MUST be equal // to the [vendor // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). // On Android (Java or Kotlin), this value MUST be equal to the Firebase // Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on // best practices and exact implementation details. Caution should be taken // when storing personal data or anything which can identify a user. GDPR // and data protection laws may apply, ensure you do your own due // diligence. DeviceIDKey = attribute.Key("device.id") // DeviceModelIdentifierKey is the attribute Key conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version // of the model identifier rather than the market or consumer-friendly name // of the device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // DeviceModelNameKey is the attribute Key conforming to the // "device.model.name" semantic conventions. It represents the marketing // name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of // the device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // DeviceManufacturerKey is the attribute Key conforming to the // "device.manufacturer" semantic conventions. It represents the name of // the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // DeviceID returns an attribute KeyValue conforming to the "device.id" // semantic conventions. It represents a unique identifier representing the // device func DeviceID(val string) attribute.KeyValue { return DeviceIDKey.String(val) } // DeviceModelIdentifier returns an attribute KeyValue conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device func DeviceModelIdentifier(val string) attribute.KeyValue { return DeviceModelIdentifierKey.String(val) } // DeviceModelName returns an attribute KeyValue conforming to the // "device.model.name" semantic conventions. It represents the marketing name // for the device model func DeviceModelName(val string) attribute.KeyValue { return DeviceModelNameKey.String(val) } // DeviceManufacturer returns an attribute KeyValue conforming to the // "device.manufacturer" semantic conventions. It represents the name of the // device manufacturer func DeviceManufacturer(val string) attribute.KeyValue { return DeviceManufacturerKey.String(val) } // A serverless instance. const ( // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic // conventions. It represents the name of the single function that this // runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the // FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) // span attributes). // // For some cloud providers, the above definition is ambiguous. The // following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud // providers/products: // // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `cloud.resource_id` attribute). FaaSNameKey = attribute.Key("faas.name") // FaaSVersionKey is the attribute Key conforming to the "faas.version" // semantic conventions. It represents the immutable version of the // function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" // semantic conventions. It represents the execution environment ID as a // string, that will be potentially reused for other invocations to the // same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // FaaSMaxMemoryKey is the attribute Key conforming to the // "faas.max_memory" semantic conventions. It represents the amount of // memory available to the serverless function converted to Bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 134217728 // Note: It's recommended to set this attribute since e.g. too little // memory can easily stop a Java AWS Lambda function from working // correctly. On AWS Lambda, the environment variable // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must // be multiplied by 1,048,576). FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // FaaSName returns an attribute KeyValue conforming to the "faas.name" // semantic conventions. It represents the name of the single function that // this runtime instance executes. func FaaSName(val string) attribute.KeyValue { return FaaSNameKey.String(val) } // FaaSVersion returns an attribute KeyValue conforming to the // "faas.version" semantic conventions. It represents the immutable version of // the function being executed. func FaaSVersion(val string) attribute.KeyValue { return FaaSVersionKey.String(val) } // FaaSInstance returns an attribute KeyValue conforming to the // "faas.instance" semantic conventions. It represents the execution // environment ID as a string, that will be potentially reused for other // invocations to the same function/function version. func FaaSInstance(val string) attribute.KeyValue { return FaaSInstanceKey.String(val) } // FaaSMaxMemory returns an attribute KeyValue conforming to the // "faas.max_memory" semantic conventions. It represents the amount of memory // available to the serverless function converted to Bytes. func FaaSMaxMemory(val int) attribute.KeyValue { return FaaSMaxMemoryKey.Int(val) } // A host is defined as a general computing instance. const ( // HostIDKey is the attribute Key conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be // the instance_id assigned by the cloud provider. For non-containerized // systems, this should be the `machine-id`. See the table below for the // sources to use to determine the `machine-id` based on operating system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'fdbf79e8af94cb7f9e8df36789187052' HostIDKey = attribute.Key("host.id") // HostNameKey is the attribute Key conforming to the "host.name" semantic // conventions. It represents the name of the host. On Unix systems, it may // contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // HostTypeKey is the attribute Key conforming to the "host.type" semantic // conventions. It represents the type of host. For Cloud, this must be the // machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // HostArchKey is the attribute Key conforming to the "host.arch" semantic // conventions. It represents the CPU architecture the host system is // running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // HostImageNameKey is the attribute Key conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // HostImageIDKey is the attribute Key conforming to the "host.image.id" // semantic conventions. It represents the vM image ID. For Cloud, this // value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // HostImageVersionKey is the attribute Key conforming to the // "host.image.version" semantic conventions. It represents the version // string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // HostID returns an attribute KeyValue conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be the // instance_id assigned by the cloud provider. For non-containerized systems, // this should be the `machine-id`. See the table below for the sources to use // to determine the `machine-id` based on operating system. func HostID(val string) attribute.KeyValue { return HostIDKey.String(val) } // HostName returns an attribute KeyValue conforming to the "host.name" // semantic conventions. It represents the name of the host. On Unix systems, // it may contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. func HostName(val string) attribute.KeyValue { return HostNameKey.String(val) } // HostType returns an attribute KeyValue conforming to the "host.type" // semantic conventions. It represents the type of host. For Cloud, this must // be the machine type. func HostType(val string) attribute.KeyValue { return HostTypeKey.String(val) } // HostImageName returns an attribute KeyValue conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. func HostImageName(val string) attribute.KeyValue { return HostImageNameKey.String(val) } // HostImageID returns an attribute KeyValue conforming to the // "host.image.id" semantic conventions. It represents the vM image ID. For // Cloud, this value is from the provider. func HostImageID(val string) attribute.KeyValue { return HostImageIDKey.String(val) } // HostImageVersion returns an attribute KeyValue conforming to the // "host.image.version" semantic conventions. It represents the version string // of the VM image as defined in [Version // Attributes](README.md#version-attributes). func HostImageVersion(val string) attribute.KeyValue { return HostImageVersionKey.String(val) } // A Kubernetes Cluster. const ( // K8SClusterNameKey is the attribute Key conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // K8SClusterName returns an attribute KeyValue conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. func K8SClusterName(val string) attribute.KeyValue { return K8SClusterNameKey.String(val) } // A Kubernetes Node object. const ( // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // K8SNodeName returns an attribute KeyValue conforming to the // "k8s.node.name" semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { return K8SNodeNameKey.String(val) } // K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } // A Kubernetes Namespace. const ( // K8SNamespaceNameKey is the attribute Key conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } // A Kubernetes Pod object. const ( // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { return K8SPodNameKey.String(val) } // A container in a // [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // K8SContainerNameKey is the attribute Key conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // K8SContainerRestartCountKey is the attribute Key conforming to the // "k8s.container.restart_count" semantic conventions. It represents the // number of times the container was restarted. This attribute can be used // to identify a particular container (running or stopped) within a // container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // K8SContainerName returns an attribute KeyValue conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). func K8SContainerName(val string) attribute.KeyValue { return K8SContainerNameKey.String(val) } // K8SContainerRestartCount returns an attribute KeyValue conforming to the // "k8s.container.restart_count" semantic conventions. It represents the number // of times the container was restarted. This attribute can be used to identify // a particular container (running or stopped) within a container spec. func K8SContainerRestartCount(val int) attribute.KeyValue { return K8SContainerRestartCountKey.Int(val) } // A Kubernetes ReplicaSet object. const ( // K8SReplicaSetUIDKey is the attribute Key conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // K8SReplicaSetNameKey is the attribute Key conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of // the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // K8SReplicaSetUID returns an attribute KeyValue conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. func K8SReplicaSetUID(val string) attribute.KeyValue { return K8SReplicaSetUIDKey.String(val) } // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. func K8SReplicaSetName(val string) attribute.KeyValue { return K8SReplicaSetNameKey.String(val) } // A Kubernetes Deployment object. const ( // K8SDeploymentUIDKey is the attribute Key conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // K8SDeploymentNameKey is the attribute Key conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of // the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // K8SDeploymentUID returns an attribute KeyValue conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. func K8SDeploymentUID(val string) attribute.KeyValue { return K8SDeploymentUIDKey.String(val) } // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. func K8SDeploymentName(val string) attribute.KeyValue { return K8SDeploymentNameKey.String(val) } // A Kubernetes StatefulSet object. const ( // K8SStatefulSetUIDKey is the attribute Key conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // K8SStatefulSetNameKey is the attribute Key conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of // the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // K8SStatefulSetUID returns an attribute KeyValue conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. func K8SStatefulSetUID(val string) attribute.KeyValue { return K8SStatefulSetUIDKey.String(val) } // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. func K8SStatefulSetName(val string) attribute.KeyValue { return K8SStatefulSetNameKey.String(val) } // A Kubernetes DaemonSet object. const ( // K8SDaemonSetUIDKey is the attribute Key conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // K8SDaemonSetNameKey is the attribute Key conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // K8SDaemonSetUID returns an attribute KeyValue conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. func K8SDaemonSetName(val string) attribute.KeyValue { return K8SDaemonSetNameKey.String(val) } // A Kubernetes Job object. const ( // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { return K8SJobNameKey.String(val) } // A Kubernetes CronJob object. const ( // K8SCronJobUIDKey is the attribute Key conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // K8SCronJobNameKey is the attribute Key conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // K8SCronJobUID returns an attribute KeyValue conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. func K8SCronJobName(val string) attribute.KeyValue { return K8SCronJobNameKey.String(val) } // The operating system (OS) on which the process represented by this resource // is running. const ( // OSTypeKey is the attribute Key conforming to the "os.type" semantic // conventions. It represents the operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // OSDescriptionKey is the attribute Key conforming to the "os.description" // semantic conventions. It represents the human readable (not intended to // be parsed) OS version information, like e.g. reported by `ver` or // `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 // LTS' OSDescriptionKey = attribute.Key("os.description") // OSNameKey is the attribute Key conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // OSVersionKey is the attribute Key conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // OSDescription returns an attribute KeyValue conforming to the // "os.description" semantic conventions. It represents the human readable (not // intended to be parsed) OS version information, like e.g. reported by `ver` // or `lsb_release -a` commands. func OSDescription(val string) attribute.KeyValue { return OSDescriptionKey.String(val) } // OSName returns an attribute KeyValue conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. func OSName(val string) attribute.KeyValue { return OSNameKey.String(val) } // OSVersion returns an attribute KeyValue conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). func OSVersion(val string) attribute.KeyValue { return OSVersionKey.String(val) } // An operating system process. const ( // ProcessPIDKey is the attribute Key conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // ProcessParentPIDKey is the attribute Key conforming to the // "process.parent_pid" semantic conventions. It represents the parent // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // ProcessExecutableNameKey is the attribute Key conforming to the // "process.executable.name" semantic conventions. It represents the name // of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name // of `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // ProcessExecutablePathKey is the attribute Key conforming to the // "process.executable.path" semantic conventions. It represents the full // path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // ProcessCommandKey is the attribute Key conforming to the // "process.command" semantic conventions. It represents the command used // to launch the process (i.e. the command name). On Linux based systems, // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can // be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // ProcessCommandLineKey is the attribute Key conforming to the // "process.command_line" semantic conventions. It represents the full // command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. // Do not set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // ProcessCommandArgsKey is the attribute Key conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // ProcessOwnerKey is the attribute Key conforming to the "process.owner" // semantic conventions. It represents the username of the user that owns // the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // ProcessPID returns an attribute KeyValue conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). func ProcessPID(val int) attribute.KeyValue { return ProcessPIDKey.Int(val) } // ProcessParentPID returns an attribute KeyValue conforming to the // "process.parent_pid" semantic conventions. It represents the parent Process // identifier (PID). func ProcessParentPID(val int) attribute.KeyValue { return ProcessParentPIDKey.Int(val) } // ProcessExecutableName returns an attribute KeyValue conforming to the // "process.executable.name" semantic conventions. It represents the name of // the process executable. On Linux based systems, can be set to the `Name` in // `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. func ProcessExecutableName(val string) attribute.KeyValue { return ProcessExecutableNameKey.String(val) } // ProcessExecutablePath returns an attribute KeyValue conforming to the // "process.executable.path" semantic conventions. It represents the full path // to the process executable. On Linux based systems, can be set to the target // of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. func ProcessExecutablePath(val string) attribute.KeyValue { return ProcessExecutablePathKey.String(val) } // ProcessCommand returns an attribute KeyValue conforming to the // "process.command" semantic conventions. It represents the command used to // launch the process (i.e. the command name). On Linux based systems, can be // set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to // the first parameter extracted from `GetCommandLineW`. func ProcessCommand(val string) attribute.KeyValue { return ProcessCommandKey.String(val) } // ProcessCommandLine returns an attribute KeyValue conforming to the // "process.command_line" semantic conventions. It represents the full command // used to launch the process as a single string representing the full command. // On Windows, can be set to the result of `GetCommandLineW`. Do not set this // if you have to assemble it just for monitoring; use `process.command_args` // instead. func ProcessCommandLine(val string) attribute.KeyValue { return ProcessCommandLineKey.String(val) } // ProcessCommandArgs returns an attribute KeyValue conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received by // the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. func ProcessCommandArgs(val ...string) attribute.KeyValue { return ProcessCommandArgsKey.StringSlice(val) } // ProcessOwner returns an attribute KeyValue conforming to the // "process.owner" semantic conventions. It represents the username of the user // that owns the process. func ProcessOwner(val string) attribute.KeyValue { return ProcessOwnerKey.String(val) } // The single (language) runtime instance which is monitored. const ( // ProcessRuntimeNameKey is the attribute Key conforming to the // "process.runtime.name" semantic conventions. It represents the name of // the runtime of this process. For compiled native binaries, this SHOULD // be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // ProcessRuntimeVersionKey is the attribute Key conforming to the // "process.runtime.version" semantic conventions. It represents the // version of the runtime of this process, as returned by the runtime // without modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // ProcessRuntimeDescriptionKey is the attribute Key conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // ProcessRuntimeName returns an attribute KeyValue conforming to the // "process.runtime.name" semantic conventions. It represents the name of the // runtime of this process. For compiled native binaries, this SHOULD be the // name of the compiler. func ProcessRuntimeName(val string) attribute.KeyValue { return ProcessRuntimeNameKey.String(val) } // ProcessRuntimeVersion returns an attribute KeyValue conforming to the // "process.runtime.version" semantic conventions. It represents the version of // the runtime of this process, as returned by the runtime without // modification. func ProcessRuntimeVersion(val string) attribute.KeyValue { return ProcessRuntimeVersionKey.String(val) } // ProcessRuntimeDescription returns an attribute KeyValue conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. func ProcessRuntimeDescription(val string) attribute.KeyValue { return ProcessRuntimeDescriptionKey.String(val) } // A service instance. const ( // ServiceNameKey is the attribute Key conforming to the "service.name" // semantic conventions. It represents the logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled // services. If the value was not specified, SDKs MUST fallback to // `unknown_service:` concatenated with // [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, // the value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // ServiceNamespaceKey is the attribute Key conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group // of services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` // is expected to be unique for all services that have no explicit // namespace defined (so the empty/unspecified namespace is simply one more // valid namespace). Zero-length namespace string is assumed equal to // unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // ServiceInstanceIDKey is the attribute Key conforming to the // "service.instance.id" semantic conventions. It represents the string ID // of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be // globally unique). The ID helps to distinguish instances of the same // service that exist at the same time (e.g. instances of a horizontally // scaled service). It is preferable for the ID to be persistent and stay // the same for the lifetime of the service instance, however it is // acceptable that the ID is ephemeral and changes during important // lifetime events for the service (e.g. service restarts). If the service // has no inherent unique ID that can be used as the value of this // attribute it is recommended to generate a random Version 1 or Version 4 // RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // ServiceVersionKey is the attribute Key conforming to the // "service.version" semantic conventions. It represents the version string // of the service API or implementation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // ServiceName returns an attribute KeyValue conforming to the // "service.name" semantic conventions. It represents the logical name of the // service. func ServiceName(val string) attribute.KeyValue { return ServiceNameKey.String(val) } // ServiceNamespace returns an attribute KeyValue conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. func ServiceNamespace(val string) attribute.KeyValue { return ServiceNamespaceKey.String(val) } // ServiceInstanceID returns an attribute KeyValue conforming to the // "service.instance.id" semantic conventions. It represents the string ID of // the service instance. func ServiceInstanceID(val string) attribute.KeyValue { return ServiceInstanceIDKey.String(val) } // ServiceVersion returns an attribute KeyValue conforming to the // "service.version" semantic conventions. It represents the version string of // the service API or implementation. func ServiceVersion(val string) attribute.KeyValue { return ServiceVersionKey.String(val) } // The telemetry SDK used to capture data recorded by the instrumentation // libraries. const ( // TelemetrySDKNameKey is the attribute Key conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // TelemetrySDKLanguageKey is the attribute Key conforming to the // "telemetry.sdk.language" semantic conventions. It represents the // language of the telemetry SDK. // // Type: Enum // RequirementLevel: Optional // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // TelemetrySDKVersionKey is the attribute Key conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // TelemetryAutoVersionKey is the attribute Key conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // TelemetrySDKName returns an attribute KeyValue conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. func TelemetrySDKName(val string) attribute.KeyValue { return TelemetrySDKNameKey.String(val) } // TelemetrySDKVersion returns an attribute KeyValue conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. func TelemetrySDKVersion(val string) attribute.KeyValue { return TelemetrySDKVersionKey.String(val) } // TelemetryAutoVersion returns an attribute KeyValue conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. func TelemetryAutoVersion(val string) attribute.KeyValue { return TelemetryAutoVersionKey.String(val) } // Resource describing the packaged software running the application code. Web // engines are typically executed using process.runtime. const ( // WebEngineNameKey is the attribute Key conforming to the "webengine.name" // semantic conventions. It represents the name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // WebEngineVersionKey is the attribute Key conforming to the // "webengine.version" semantic conventions. It represents the version of // the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // WebEngineDescriptionKey is the attribute Key conforming to the // "webengine.description" semantic conventions. It represents the // additional description of the web engine (e.g. detailed version and // edition information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - // 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) // WebEngineName returns an attribute KeyValue conforming to the // "webengine.name" semantic conventions. It represents the name of the web // engine. func WebEngineName(val string) attribute.KeyValue { return WebEngineNameKey.String(val) } // WebEngineVersion returns an attribute KeyValue conforming to the // "webengine.version" semantic conventions. It represents the version of the // web engine. func WebEngineVersion(val string) attribute.KeyValue { return WebEngineVersionKey.String(val) } // WebEngineDescription returns an attribute KeyValue conforming to the // "webengine.description" semantic conventions. It represents the additional // description of the web engine (e.g. detailed version and edition // information). func WebEngineDescription(val string) attribute.KeyValue { return WebEngineDescriptionKey.String(val) } // Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's // concepts. const ( // OTelScopeNameKey is the attribute Key conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' OTelScopeNameKey = attribute.Key("otel.scope.name") // OTelScopeVersionKey is the attribute Key conforming to the // "otel.scope.version" semantic conventions. It represents the version of // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0.0' OTelScopeVersionKey = attribute.Key("otel.scope.version") ) // OTelScopeName returns an attribute KeyValue conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). func OTelScopeName(val string) attribute.KeyValue { return OTelScopeNameKey.String(val) } // OTelScopeVersion returns an attribute KeyValue conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). func OTelScopeVersion(val string) attribute.KeyValue { return OTelScopeVersionKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry // Scope's concepts. const ( // OTelLibraryNameKey is the attribute Key conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, // use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' OTelLibraryNameKey = attribute.Key("otel.library.name") // OTelLibraryVersionKey is the attribute Key conforming to the // "otel.library.version" semantic conventions. It represents the // deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '1.0.0' OTelLibraryVersionKey = attribute.Key("otel.library.version") ) // OTelLibraryName returns an attribute KeyValue conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, use // the `otel.scope.name` attribute. func OTelLibraryName(val string) attribute.KeyValue { return OTelLibraryNameKey.String(val) } // OTelLibraryVersion returns an attribute KeyValue conforming to the // "otel.library.version" semantic conventions. It represents the deprecated, // use the `otel.scope.version` attribute. func OTelLibraryVersion(val string) attribute.KeyValue { return OTelLibraryVersionKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.19.0/schema.go000066400000000000000000000017141452547353200212640ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.19.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.19.0" opentelemetry-go-1.21.0/semconv/v1.19.0/trace.go000066400000000000000000002572601452547353200211330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.19.0" import "go.opentelemetry.io/otel/attribute" // The shared attributes used to report a single exception associated with a // span or log. const ( // ExceptionTypeKey is the attribute Key conforming to the "exception.type" // semantic conventions. It represents the type of the exception (its // fully-qualified class name, if applicable). The dynamic type of the // exception should be preferred over the static type in languages that // support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // ExceptionMessageKey is the attribute Key conforming to the // "exception.message" semantic conventions. It represents the exception // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str // implicitly" ExceptionMessageKey = attribute.Key("exception.message") // ExceptionStacktraceKey is the attribute Key conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace // as a string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) // ExceptionType returns an attribute KeyValue conforming to the // "exception.type" semantic conventions. It represents the type of the // exception (its fully-qualified class name, if applicable). The dynamic type // of the exception should be preferred over the static type in languages that // support it. func ExceptionType(val string) attribute.KeyValue { return ExceptionTypeKey.String(val) } // ExceptionMessage returns an attribute KeyValue conforming to the // "exception.message" semantic conventions. It represents the exception // message. func ExceptionMessage(val string) attribute.KeyValue { return ExceptionMessageKey.String(val) } // ExceptionStacktrace returns an attribute KeyValue conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace as a // string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. func ExceptionStacktrace(val string) attribute.KeyValue { return ExceptionStacktraceKey.String(val) } // Span attributes used by AWS Lambda (in addition to general `faas` // attributes). const ( // AWSLambdaInvokedARNKey is the attribute Key conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `cloud.resource_id` if an alias is // involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // AWSLambdaInvokedARN returns an attribute KeyValue conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). func AWSLambdaInvokedARN(val string) attribute.KeyValue { return AWSLambdaInvokedARNKey.String(val) } // Attributes for CloudEvents. CloudEvents is a specification on how to define // event data in a standard way. These attributes can be attached to spans when // performing operations with CloudEvents, regardless of the protocol being // used. const ( // CloudeventsEventIDKey is the attribute Key conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // CloudeventsEventSourceKey is the attribute Key conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', // '/cloudevents/spec/pull/123', 'my-service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // CloudeventsEventSpecVersionKey is the attribute Key conforming to the // "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // CloudeventsEventTypeKey is the attribute Key conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', // 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // CloudeventsEventSubjectKey is the attribute Key conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // CloudeventsEventID returns an attribute KeyValue conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. func CloudeventsEventID(val string) attribute.KeyValue { return CloudeventsEventIDKey.String(val) } // CloudeventsEventSource returns an attribute KeyValue conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. func CloudeventsEventSource(val string) attribute.KeyValue { return CloudeventsEventSourceKey.String(val) } // CloudeventsEventSpecVersion returns an attribute KeyValue conforming to // the "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. func CloudeventsEventSpecVersion(val string) attribute.KeyValue { return CloudeventsEventSpecVersionKey.String(val) } // CloudeventsEventType returns an attribute KeyValue conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. func CloudeventsEventType(val string) attribute.KeyValue { return CloudeventsEventTypeKey.String(val) } // CloudeventsEventSubject returns an attribute KeyValue conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by source). func CloudeventsEventSubject(val string) attribute.KeyValue { return CloudeventsEventSubjectKey.String(val) } // Semantic conventions for the OpenTracing Shim const ( // OpentracingRefTypeKey is the attribute Key conforming to the // "opentracing.ref_type" semantic conventions. It represents the // parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // The attributes used to perform database client calls. const ( // DBSystemKey is the attribute Key conforming to the "db.system" semantic // conventions. It represents an identifier for the database management // system (DBMS) product being used. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // DBConnectionStringKey is the attribute Key conforming to the // "db.connection_string" semantic conventions. It represents the // connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // DBUserKey is the attribute Key conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // DBJDBCDriverClassnameKey is the attribute Key conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) // driver used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // DBNameKey is the attribute Key conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name // of the database being accessed. For commands that switch the database, // this should be set to the target database (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called // "schema name". In case there are multiple layers that could be // considered for database name (e.g. Oracle instance name and schema // name), the database name to be used is the more specific layer (e.g. // Oracle schema name). DBNameKey = attribute.Key("db.name") // DBStatementKey is the attribute Key conforming to the "db.statement" // semantic conventions. It represents the database statement being // executed. // // Type: string // RequirementLevel: ConditionallyRequired (If applicable and not // explicitly disabled via instrumentation configuration.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // DBOperationKey is the attribute Key conforming to the "db.operation" // semantic conventions. It represents the name of the operation being // executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not // applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to // attempt any client-side parsing of `db.statement` just to get this // property, but it should be set if the operation name is provided by the // library being instrumented. If the SQL statement has an ambiguous // operation, or performs more than one operation, this value may be // omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // Microsoft SQL Server Compact DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") // ClickHouse DBSystemClickhouse = DBSystemKey.String("clickhouse") // Cloud Spanner DBSystemSpanner = DBSystemKey.String("spanner") ) // DBConnectionString returns an attribute KeyValue conforming to the // "db.connection_string" semantic conventions. It represents the connection // string used to connect to the database. It is recommended to remove embedded // credentials. func DBConnectionString(val string) attribute.KeyValue { return DBConnectionStringKey.String(val) } // DBUser returns an attribute KeyValue conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. func DBUser(val string) attribute.KeyValue { return DBUserKey.String(val) } // DBJDBCDriverClassname returns an attribute KeyValue conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. func DBJDBCDriverClassname(val string) attribute.KeyValue { return DBJDBCDriverClassnameKey.String(val) } // DBName returns an attribute KeyValue conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name of // the database being accessed. For commands that switch the database, this // should be set to the target database (even if the command fails). func DBName(val string) attribute.KeyValue { return DBNameKey.String(val) } // DBStatement returns an attribute KeyValue conforming to the // "db.statement" semantic conventions. It represents the database statement // being executed. func DBStatement(val string) attribute.KeyValue { return DBStatementKey.String(val) } // DBOperation returns an attribute KeyValue conforming to the // "db.operation" semantic conventions. It represents the name of the operation // being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. func DBOperation(val string) attribute.KeyValue { return DBOperationKey.String(val) } // Connection-level attributes for Microsoft SQL Server const ( // DBMSSQLInstanceNameKey is the attribute Key conforming to the // "db.mssql.instance_name" semantic conventions. It represents the // Microsoft SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named // instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no // longer required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // DBMSSQLInstanceName returns an attribute KeyValue conforming to the // "db.mssql.instance_name" semantic conventions. It represents the Microsoft // SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. func DBMSSQLInstanceName(val string) attribute.KeyValue { return DBMSSQLInstanceNameKey.String(val) } // Call-level attributes for Cassandra const ( // DBCassandraPageSizeKey is the attribute Key conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch // size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // DBCassandraConsistencyLevelKey is the attribute Key conforming to the // "db.cassandra.consistency_level" semantic conventions. It represents the // consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // DBCassandraTableKey is the attribute Key conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace // name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra // rather than sql. It is not recommended to attempt any client-side // parsing of `db.statement` just to get this property, but it should be // set if it is provided by the library being instrumented. If the // operation is acting upon an anonymous table, or more than one table, // this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // DBCassandraIdempotenceKey is the attribute Key conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the // whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming // to the "db.cassandra.speculative_execution_count" semantic conventions. // It represents the number of times a query was speculatively executed. // Not set or `0` if the query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // DBCassandraCoordinatorIDKey is the attribute Key conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID // of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // DBCassandraCoordinatorDCKey is the attribute Key conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the // data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // DBCassandraPageSize returns an attribute KeyValue conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch size // used for paging, i.e. how many rows will be returned at once. func DBCassandraPageSize(val int) attribute.KeyValue { return DBCassandraPageSizeKey.Int(val) } // DBCassandraTable returns an attribute KeyValue conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace name // (if applicable). func DBCassandraTable(val string) attribute.KeyValue { return DBCassandraTableKey.String(val) } // DBCassandraIdempotence returns an attribute KeyValue conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the whether // or not the query is idempotent. func DBCassandraIdempotence(val bool) attribute.KeyValue { return DBCassandraIdempotenceKey.Bool(val) } // DBCassandraSpeculativeExecutionCount returns an attribute KeyValue // conforming to the "db.cassandra.speculative_execution_count" semantic // conventions. It represents the number of times a query was speculatively // executed. Not set or `0` if the query was not executed speculatively. func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { return DBCassandraSpeculativeExecutionCountKey.Int(val) } // DBCassandraCoordinatorID returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID of // the coordinating node for a query. func DBCassandraCoordinatorID(val string) attribute.KeyValue { return DBCassandraCoordinatorIDKey.String(val) } // DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the data // center of the coordinating node for a query. func DBCassandraCoordinatorDC(val string) attribute.KeyValue { return DBCassandraCoordinatorDCKey.String(val) } // Call-level attributes for Redis const ( // DBRedisDBIndexKey is the attribute Key conforming to the // "db.redis.database_index" semantic conventions. It represents the index // of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To // be used instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default // database (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // DBRedisDBIndex returns an attribute KeyValue conforming to the // "db.redis.database_index" semantic conventions. It represents the index of // the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be // used instead of the generic `db.name` attribute. func DBRedisDBIndex(val int) attribute.KeyValue { return DBRedisDBIndexKey.Int(val) } // Call-level attributes for MongoDB const ( // DBMongoDBCollectionKey is the attribute Key conforming to the // "db.mongodb.collection" semantic conventions. It represents the // collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // DBMongoDBCollection returns an attribute KeyValue conforming to the // "db.mongodb.collection" semantic conventions. It represents the collection // being accessed within the database stated in `db.name`. func DBMongoDBCollection(val string) attribute.KeyValue { return DBMongoDBCollectionKey.String(val) } // Call-level attributes for SQL databases const ( // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that // the operation is acting upon, including the database name (if // applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting // upon an anonymous table, or more than one table, this value MUST NOT be // set. DBSQLTableKey = attribute.Key("db.sql.table") ) // DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that the // operation is acting upon, including the database name (if applicable). func DBSQLTable(val string) attribute.KeyValue { return DBSQLTableKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's // concepts. const ( // OTelStatusCodeKey is the attribute Key conforming to the // "otel.status_code" semantic conventions. It represents the name of the // code, either "OK" or "ERROR". MUST NOT be set if the status code is // UNSET. // // Type: Enum // RequirementLevel: Optional // Stability: stable OTelStatusCodeKey = attribute.Key("otel.status_code") // OTelStatusDescriptionKey is the attribute Key conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'resource not found' OTelStatusDescriptionKey = attribute.Key("otel.status_description") ) var ( // The operation has been validated by an Application developer or Operator to have completed successfully OTelStatusCodeOk = OTelStatusCodeKey.String("OK") // The operation contains an error OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") ) // OTelStatusDescription returns an attribute KeyValue conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. func OTelStatusDescription(val string) attribute.KeyValue { return OTelStatusDescriptionKey.String(val) } // This semantic convention describes an instance of a function that runs // without provisioning or managing of servers (also known as serverless // functions or Function as a Service (FaaS)) with spans. const ( // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" // semantic conventions. It represents the type of the trigger which caused // this function invocation. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // FaaSInvocationIDKey is the attribute Key conforming to the // "faas.invocation_id" semantic conventions. It represents the invocation // ID of the current function invocation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSInvocationIDKey = attribute.Key("faas.invocation_id") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // FaaSInvocationID returns an attribute KeyValue conforming to the // "faas.invocation_id" semantic conventions. It represents the invocation ID // of the current function invocation. func FaaSInvocationID(val string) attribute.KeyValue { return FaaSInvocationIDKey.String(val) } // Semantic Convention for FaaS triggered as a response to some data source // operation such as a database or filesystem read/write. const ( // FaaSDocumentCollectionKey is the attribute Key conforming to the // "faas.document.collection" semantic conventions. It represents the name // of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in // Cosmos DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // FaaSDocumentOperationKey is the attribute Key conforming to the // "faas.document.operation" semantic conventions. It represents the // describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // FaaSDocumentTimeKey is the attribute Key conforming to the // "faas.document.time" semantic conventions. It represents a string // containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // FaaSDocumentNameKey is the attribute Key conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or // S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // FaaSDocumentCollection returns an attribute KeyValue conforming to the // "faas.document.collection" semantic conventions. It represents the name of // the source on which the triggering operation was performed. For example, in // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the // database name. func FaaSDocumentCollection(val string) attribute.KeyValue { return FaaSDocumentCollectionKey.String(val) } // FaaSDocumentTime returns an attribute KeyValue conforming to the // "faas.document.time" semantic conventions. It represents a string containing // the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSDocumentTime(val string) attribute.KeyValue { return FaaSDocumentTimeKey.String(val) } // FaaSDocumentName returns an attribute KeyValue conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or S3 // is the name of the file, and in Cosmos DB the table name. func FaaSDocumentName(val string) attribute.KeyValue { return FaaSDocumentNameKey.String(val) } // Semantic Convention for FaaS scheduled to be executed regularly. const ( // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic // conventions. It represents a string containing the function invocation // time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic // conventions. It represents a string containing the schedule period as // [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // FaaSTime returns an attribute KeyValue conforming to the "faas.time" // semantic conventions. It represents a string containing the function // invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSTime(val string) attribute.KeyValue { return FaaSTimeKey.String(val) } // FaaSCron returns an attribute KeyValue conforming to the "faas.cron" // semantic conventions. It represents a string containing the schedule period // as [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). func FaaSCron(val string) attribute.KeyValue { return FaaSCronKey.String(val) } // Contains additional attributes for incoming FaaS spans. const ( // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" // semantic conventions. It represents a boolean that is true if the // serverless function is executed for the first time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // FaaSColdstart returns an attribute KeyValue conforming to the // "faas.coldstart" semantic conventions. It represents a boolean that is true // if the serverless function is executed for the first time (aka cold-start). func FaaSColdstart(val bool) attribute.KeyValue { return FaaSColdstartKey.Bool(val) } // Contains additional attributes for outgoing FaaS spans. const ( // FaaSInvokedNameKey is the attribute Key conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the // invoked function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // FaaSInvokedProviderKey is the attribute Key conforming to the // "faas.invoked_provider" semantic conventions. It represents the cloud // provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the // invoked function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // FaaSInvokedRegionKey is the attribute Key conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud // region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like // AWS or GCP, the region in which a function is hosted is essential to // uniquely identify the function and also part of its endpoint. Since it's // part of the endpoint being called, the region is always known to // clients. In these cases, `faas.invoked_region` MUST be set accordingly. // If the region is unknown to the client or not required for identifying // the invoked function, setting `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the // invoked function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // FaaSInvokedName returns an attribute KeyValue conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. func FaaSInvokedName(val string) attribute.KeyValue { return FaaSInvokedNameKey.String(val) } // FaaSInvokedRegion returns an attribute KeyValue conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud region // of the invoked function. func FaaSInvokedRegion(val string) attribute.KeyValue { return FaaSInvokedRegionKey.String(val) } // Operations that access some remote service. const ( // PeerServiceKey is the attribute Key conforming to the "peer.service" // semantic conventions. It represents the // [`service.name`](../../resource/semantic_conventions/README.md#service) // of the remote service. SHOULD be equal to the actual `service.name` // resource attribute of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // PeerService returns an attribute KeyValue conforming to the // "peer.service" semantic conventions. It represents the // [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. func PeerService(val string) attribute.KeyValue { return PeerServiceKey.String(val) } // These attributes may be used for any operation with an authenticated and/or // authorized enduser. const ( // EnduserIDKey is the attribute Key conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted // from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header // in the inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // EnduserRoleKey is the attribute Key conforming to the "enduser.role" // semantic conventions. It represents the actual/assumed role the client // is making the request under extracted from token or application security // context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" // semantic conventions. It represents the scopes or granted authorities // the client currently possesses extracted from token or application // security context. The value would come from the scope associated with an // [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // EnduserID returns an attribute KeyValue conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted from // the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in // the inbound request from outside the system. func EnduserID(val string) attribute.KeyValue { return EnduserIDKey.String(val) } // EnduserRole returns an attribute KeyValue conforming to the // "enduser.role" semantic conventions. It represents the actual/assumed role // the client is making the request under extracted from token or application // security context. func EnduserRole(val string) attribute.KeyValue { return EnduserRoleKey.String(val) } // EnduserScope returns an attribute KeyValue conforming to the // "enduser.scope" semantic conventions. It represents the scopes or granted // authorities the client currently possesses extracted from token or // application security context. The value would come from the scope associated // with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). func EnduserScope(val string) attribute.KeyValue { return EnduserScopeKey.String(val) } // These attributes may be used for any operation to store information about a // thread that started a span. const ( // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic // conventions. It represents the current "managed" thread ID (as opposed // to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // ThreadNameKey is the attribute Key conforming to the "thread.name" // semantic conventions. It represents the current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // ThreadID returns an attribute KeyValue conforming to the "thread.id" // semantic conventions. It represents the current "managed" thread ID (as // opposed to OS thread ID). func ThreadID(val int) attribute.KeyValue { return ThreadIDKey.Int(val) } // ThreadName returns an attribute KeyValue conforming to the "thread.name" // semantic conventions. It represents the current thread name. func ThreadName(val string) attribute.KeyValue { return ThreadNameKey.String(val) } // These attributes allow to report this unit of code and therefore to provide // more context about the span. const ( // CodeFunctionKey is the attribute Key conforming to the "code.function" // semantic conventions. It represents the method or function name, or // equivalent (usually rightmost part of the code unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" // semantic conventions. It represents the "namespace" within which // `code.function` is defined. Usually the qualified class or module name, // such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // CodeFilepathKey is the attribute Key conforming to the "code.filepath" // semantic conventions. It represents the source code file name that // identifies the code unit as uniquely as possible (preferably an absolute // file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") // CodeColumnKey is the attribute Key conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 16 CodeColumnKey = attribute.Key("code.column") ) // CodeFunction returns an attribute KeyValue conforming to the // "code.function" semantic conventions. It represents the method or function // name, or equivalent (usually rightmost part of the code unit's name). func CodeFunction(val string) attribute.KeyValue { return CodeFunctionKey.String(val) } // CodeNamespace returns an attribute KeyValue conforming to the // "code.namespace" semantic conventions. It represents the "namespace" within // which `code.function` is defined. Usually the qualified class or module // name, such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. func CodeNamespace(val string) attribute.KeyValue { return CodeNamespaceKey.String(val) } // CodeFilepath returns an attribute KeyValue conforming to the // "code.filepath" semantic conventions. It represents the source code file // name that identifies the code unit as uniquely as possible (preferably an // absolute file path). func CodeFilepath(val string) attribute.KeyValue { return CodeFilepathKey.String(val) } // CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` best // representing the operation. It SHOULD point within the code unit named in // `code.function`. func CodeLineNumber(val int) attribute.KeyValue { return CodeLineNumberKey.Int(val) } // CodeColumn returns an attribute KeyValue conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit named // in `code.function`. func CodeColumn(val int) attribute.KeyValue { return CodeColumnKey.Int(val) } // Semantic Convention for HTTP Client const ( // HTTPURLKey is the attribute Key conforming to the "http.url" semantic // conventions. It represents the full HTTP request URL in the form // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is // not transmitted over HTTP, but if it is known, it should be included // nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the // attribute's value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // HTTPResendCountKey is the attribute Key conforming to the // "http.resend_count" semantic conventions. It represents the ordinal // number of request resending attempt (for any reason, including // redirects). // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 // Note: The resend count SHOULD be updated each time an HTTP request gets // resent by the client, regardless of what was the cause of the resending // (e.g. redirection, authorization failure, 503 Server Unavailable, // network issues, or any other). HTTPResendCountKey = attribute.Key("http.resend_count") ) // HTTPURL returns an attribute KeyValue conforming to the "http.url" // semantic conventions. It represents the full HTTP request URL in the form // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not // transmitted over HTTP, but if it is known, it should be included // nevertheless. func HTTPURL(val string) attribute.KeyValue { return HTTPURLKey.String(val) } // HTTPResendCount returns an attribute KeyValue conforming to the // "http.resend_count" semantic conventions. It represents the ordinal number // of request resending attempt (for any reason, including redirects). func HTTPResendCount(val int) attribute.KeyValue { return HTTPResendCountKey.Int(val) } // Semantic Convention for HTTP Server const ( // HTTPTargetKey is the attribute Key conforming to the "http.target" // semantic conventions. It represents the full request target as passed in // a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/path/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" // semantic conventions. It represents the IP address of the original // client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.sock.peer.addr`, which // would // identify the network-level peer, which may be a proxy. // // This attribute should be set when a source of information different // from the one used for `net.sock.peer.addr`, is available even if that // other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.sock.peer.addr` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // HTTPTarget returns an attribute KeyValue conforming to the "http.target" // semantic conventions. It represents the full request target as passed in a // HTTP request line or equivalent. func HTTPTarget(val string) attribute.KeyValue { return HTTPTargetKey.String(val) } // HTTPClientIP returns an attribute KeyValue conforming to the // "http.client_ip" semantic conventions. It represents the IP address of the // original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). func HTTPClientIP(val string) attribute.KeyValue { return HTTPClientIPKey.String(val) } // Attributes that exist for multiple DynamoDB request types. const ( // AWSDynamoDBTableNamesKey is the attribute Key conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys // in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number }, "TableName": "string", // "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to // the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` // response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, // "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` // request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. // It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the // value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // AWSDynamoDBProjectionKey is the attribute Key conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value // of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, // RelatedItems, ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // AWSDynamoDBLimitKey is the attribute Key conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of // the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // AWSDynamoDBIndexNameKey is the attribute Key conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value // of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // AWSDynamoDBSelectKey is the attribute Key conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of // the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // AWSDynamoDBTableNames returns an attribute KeyValue conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys in // the `RequestItems` object field. func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { return AWSDynamoDBTableNamesKey.StringSlice(val) } // AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to // the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response field. func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { return AWSDynamoDBConsumedCapacityKey.StringSlice(val) } // AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming // to the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` response // field. func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { return AWSDynamoDBItemCollectionMetricsKey.String(val) } // AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_read_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.ReadCapacityUnits` request parameter. func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) } // AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_write_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) } // AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the value // of the `ConsistentRead` request parameter. func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { return AWSDynamoDBConsistentReadKey.Bool(val) } // AWSDynamoDBProjection returns an attribute KeyValue conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value of // the `ProjectionExpression` request parameter. func AWSDynamoDBProjection(val string) attribute.KeyValue { return AWSDynamoDBProjectionKey.String(val) } // AWSDynamoDBLimit returns an attribute KeyValue conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of the // `Limit` request parameter. func AWSDynamoDBLimit(val int) attribute.KeyValue { return AWSDynamoDBLimitKey.Int(val) } // AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to // the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { return AWSDynamoDBAttributesToGetKey.StringSlice(val) } // AWSDynamoDBIndexName returns an attribute KeyValue conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value of // the `IndexName` request parameter. func AWSDynamoDBIndexName(val string) attribute.KeyValue { return AWSDynamoDBIndexNameKey.String(val) } // AWSDynamoDBSelect returns an attribute KeyValue conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of the // `Select` request parameter. func AWSDynamoDBSelect(val string) attribute.KeyValue { return AWSDynamoDBSelectKey.String(val) } // DynamoDB.CreateTable const ( // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_indexes" semantic // conventions. It represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) } // AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming // to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) } // DynamoDB.ListTables const ( // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents // the value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // AWSDynamoDBTableCountKey is the attribute Key conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming // to the "aws.dynamodb.exclusive_start_table" semantic conventions. It // represents the value of the `ExclusiveStartTableName` request parameter. func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { return AWSDynamoDBExclusiveStartTableKey.String(val) } // AWSDynamoDBTableCount returns an attribute KeyValue conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. func AWSDynamoDBTableCount(val int) attribute.KeyValue { return AWSDynamoDBTableCountKey.Int(val) } // DynamoDB.Query const ( // AWSDynamoDBScanForwardKey is the attribute Key conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the // value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // AWSDynamoDBScanForward returns an attribute KeyValue conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of // the `ScanIndexForward` request parameter. func AWSDynamoDBScanForward(val bool) attribute.KeyValue { return AWSDynamoDBScanForwardKey.Bool(val) } // DynamoDB.Scan const ( // AWSDynamoDBSegmentKey is the attribute Key conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of // the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the // value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // AWSDynamoDBCountKey is the attribute Key conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of // the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // AWSDynamoDBScannedCountKey is the attribute Key conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the // value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // AWSDynamoDBSegment returns an attribute KeyValue conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of the // `Segment` request parameter. func AWSDynamoDBSegment(val int) attribute.KeyValue { return AWSDynamoDBSegmentKey.Int(val) } // AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the value // of the `TotalSegments` request parameter. func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { return AWSDynamoDBTotalSegmentsKey.Int(val) } // AWSDynamoDBCount returns an attribute KeyValue conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of the // `Count` response parameter. func AWSDynamoDBCount(val int) attribute.KeyValue { return AWSDynamoDBCountKey.Int(val) } // AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the value // of the `ScannedCount` response parameter. func AWSDynamoDBScannedCount(val int) attribute.KeyValue { return AWSDynamoDBScannedCountKey.Int(val) } // DynamoDB.UpdateTable const ( // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to // the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the // the `GlobalSecondaryIndexUpdates` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming // to the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) } // AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the the // `GlobalSecondaryIndexUpdates` request field. func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) } // Semantic conventions to apply when instrumenting the GraphQL implementation. // They map GraphQL operations to attributes on a Span. const ( // GraphqlOperationNameKey is the attribute Key conforming to the // "graphql.operation.name" semantic conventions. It represents the name of // the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // GraphqlOperationTypeKey is the attribute Key conforming to the // "graphql.operation.type" semantic conventions. It represents the type of // the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // GraphqlDocumentKey is the attribute Key conforming to the // "graphql.document" semantic conventions. It represents the GraphQL // document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // GraphqlOperationName returns an attribute KeyValue conforming to the // "graphql.operation.name" semantic conventions. It represents the name of the // operation being executed. func GraphqlOperationName(val string) attribute.KeyValue { return GraphqlOperationNameKey.String(val) } // GraphqlDocument returns an attribute KeyValue conforming to the // "graphql.document" semantic conventions. It represents the GraphQL document // being executed. func GraphqlDocument(val string) attribute.KeyValue { return GraphqlDocumentKey.String(val) } // General attributes used in messaging systems. const ( // MessagingSystemKey is the attribute Key conforming to the // "messaging.system" semantic conventions. It represents a string // identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // MessagingOperationKey is the attribute Key conforming to the // "messaging.operation" semantic conventions. It represents a string // identifying the kind of messaging operation as defined in the [Operation // names](#operation-names) section above. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: If a custom value is used, it MUST be of low cardinality. MessagingOperationKey = attribute.Key("messaging.operation") // MessagingBatchMessageCountKey is the attribute Key conforming to the // "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the // batching operation. // // Type: int // RequirementLevel: ConditionallyRequired (If the span describes an // operation on a batch of messages.) // Stability: stable // Examples: 0, 1, 2 // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on // spans that operate with a single message. When a messaging client // library supports both batch and single-message API for the same // operation, instrumentations SHOULD use `messaging.batch.message_count` // for batching APIs and SHOULD NOT use it for single-message APIs. MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ) var ( // publish MessagingOperationPublish = MessagingOperationKey.String("publish") // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // MessagingSystem returns an attribute KeyValue conforming to the // "messaging.system" semantic conventions. It represents a string identifying // the messaging system. func MessagingSystem(val string) attribute.KeyValue { return MessagingSystemKey.String(val) } // MessagingBatchMessageCount returns an attribute KeyValue conforming to // the "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the batching // operation. func MessagingBatchMessageCount(val int) attribute.KeyValue { return MessagingBatchMessageCountKey.Int(val) } // Semantic convention for a consumer of messages received from a messaging // system const ( // MessagingConsumerIDKey is the attribute Key conforming to the // "messaging.consumer.id" semantic conventions. It represents the // identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if // both are present, or only `messaging.kafka.consumer.group`. For brokers, // such as RabbitMQ and Artemis, set it to the `client_id` of the client // consuming the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") ) // MessagingConsumerID returns an attribute KeyValue conforming to the // "messaging.consumer.id" semantic conventions. It represents the identifier // for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both // are present, or only `messaging.kafka.consumer.group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. func MessagingConsumerID(val string) attribute.KeyValue { return MessagingConsumerIDKey.String(val) } // Semantic conventions for remote procedure calls. const ( // RPCSystemKey is the attribute Key conforming to the "rpc.system" // semantic conventions. It represents a string identifying the remoting // system. See below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // RPCServiceKey is the attribute Key conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the // service being called, including its package name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing // class. The `code.namespace` attribute may be used to store the latter // (despite the attribute name, it may include a class name; e.g., class // with method actually executing the call on the server side, RPC client // stub class on the client side). RPCServiceKey = attribute.Key("rpc.service") // RPCMethodKey is the attribute Key conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method // being called, must be equal to the $method part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the // latter (e.g., method actually executing the call on the server side, RPC // client stub method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") // Connect RPC RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") ) // RPCService returns an attribute KeyValue conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the service // being called, including its package name, if applicable. func RPCService(val string) attribute.KeyValue { return RPCServiceKey.String(val) } // RPCMethod returns an attribute KeyValue conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method being // called, must be equal to the $method part in the span name. func RPCMethod(val string) attribute.KeyValue { return RPCMethodKey.String(val) } // Tech-specific attributes for gRPC. const ( // RPCGRPCStatusCodeKey is the attribute Key conforming to the // "rpc.grpc.status_code" semantic conventions. It represents the [numeric // status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of // the gRPC request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // RPCJsonrpcVersionKey is the attribute Key conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default // version (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // RPCJsonrpcRequestIDKey is the attribute Key conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be // cast to string for simplicity. Use empty string in case of `null` value. // Omit entirely if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPCJsonrpcVersion returns an attribute KeyValue conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. func RPCJsonrpcVersion(val string) attribute.KeyValue { return RPCJsonrpcVersionKey.String(val) } // RPCJsonrpcRequestID returns an attribute KeyValue conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, string, // `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit // entirely if this is a notification. func RPCJsonrpcRequestID(val string) attribute.KeyValue { return RPCJsonrpcRequestIDKey.String(val) } // RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. func RPCJsonrpcErrorCode(val int) attribute.KeyValue { return RPCJsonrpcErrorCodeKey.Int(val) } // RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { return RPCJsonrpcErrorMessageKey.String(val) } // Tech-specific attributes for Connect RPC. const ( // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the // "rpc.connect_rpc.error_code" semantic conventions. It represents the // [error codes](https://connect.build/docs/protocol/#error-codes) of the // Connect request. Error codes are always string values. // // Type: Enum // RequirementLevel: ConditionallyRequired (If response is not successful // and if error code available.) // Stability: stable RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") ) var ( // cancelled RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") // unknown RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") // invalid_argument RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") // deadline_exceeded RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") // not_found RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") // already_exists RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") // permission_denied RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") // resource_exhausted RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") // failed_precondition RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") // aborted RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") // out_of_range RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") // unimplemented RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") // internal RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") // unavailable RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") // data_loss RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") // unauthenticated RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") ) opentelemetry-go-1.21.0/semconv/v1.20.0/000077500000000000000000000000001452547353200174625ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.20.0/attribute_group.go000066400000000000000000001400621452547353200232330ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" import "go.opentelemetry.io/otel/attribute" // Describes HTTP attributes. const ( // HTTPMethodKey is the attribute Key conforming to the "http.method" // semantic conventions. It represents the hTTP request method. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // HTTPStatusCodeKey is the attribute Key conforming to the // "http.status_code" semantic conventions. It represents the [HTTP // response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was // received/sent.) // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") ) // HTTPMethod returns an attribute KeyValue conforming to the "http.method" // semantic conventions. It represents the hTTP request method. func HTTPMethod(val string) attribute.KeyValue { return HTTPMethodKey.String(val) } // HTTPStatusCode returns an attribute KeyValue conforming to the // "http.status_code" semantic conventions. It represents the [HTTP response // status code](https://tools.ietf.org/html/rfc7231#section-6). func HTTPStatusCode(val int) attribute.KeyValue { return HTTPStatusCodeKey.Int(val) } // HTTP Server spans attributes const ( // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" // semantic conventions. It represents the URI scheme identifying the used // protocol. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // HTTPRouteKey is the attribute Key conforming to the "http.route" // semantic conventions. It represents the matched route (path template in // the format used by the respective server framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: MUST NOT be populated when this is not supported by the HTTP // server framework as the route attribute should have low-cardinality and // the URI path can NOT substitute it. // SHOULD include the [application // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) // if there is one. HTTPRouteKey = attribute.Key("http.route") ) // HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" // semantic conventions. It represents the URI scheme identifying the used // protocol. func HTTPScheme(val string) attribute.KeyValue { return HTTPSchemeKey.String(val) } // HTTPRoute returns an attribute KeyValue conforming to the "http.route" // semantic conventions. It represents the matched route (path template in the // format used by the respective server framework). See note below func HTTPRoute(val string) attribute.KeyValue { return HTTPRouteKey.String(val) } // Attributes for Events represented using Log Records. const ( // EventNameKey is the attribute Key conforming to the "event.name" // semantic conventions. It represents the name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") // EventDomainKey is the attribute Key conforming to the "event.domain" // semantic conventions. It represents the domain identifies the business // context for the events. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: Events across different domains may have same `event.name`, yet be // unrelated events. EventDomainKey = attribute.Key("event.domain") ) var ( // Events from browser apps EventDomainBrowser = EventDomainKey.String("browser") // Events from mobile apps EventDomainDevice = EventDomainKey.String("device") // Events from Kubernetes EventDomainK8S = EventDomainKey.String("k8s") ) // EventName returns an attribute KeyValue conforming to the "event.name" // semantic conventions. It represents the name identifies the event. func EventName(val string) attribute.KeyValue { return EventNameKey.String(val) } // These attributes may be used for any network related operation. const ( // NetTransportKey is the attribute Key conforming to the "net.transport" // semantic conventions. It represents the transport protocol used. See // note below. // // Type: Enum // RequirementLevel: Optional // Stability: stable NetTransportKey = attribute.Key("net.transport") // NetProtocolNameKey is the attribute Key conforming to the // "net.protocol.name" semantic conventions. It represents the application // layer protocol used. The value SHOULD be normalized to lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetProtocolNameKey = attribute.Key("net.protocol.name") // NetProtocolVersionKey is the attribute Key conforming to the // "net.protocol.version" semantic conventions. It represents the version // of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `net.protocol.version` refers to the version of the protocol used // and might be different from the protocol client's version. If the HTTP // client used has a version of `0.27.2`, but sends HTTP version `1.1`, // this attribute should be set to `1.1`. NetProtocolVersionKey = attribute.Key("net.protocol.version") // NetSockPeerNameKey is the attribute Key conforming to the // "net.sock.peer.name" semantic conventions. It represents the remote // socket peer name. // // Type: string // RequirementLevel: Recommended (If available and different from // `net.peer.name` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 'proxy.example.com' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // NetSockPeerAddrKey is the attribute Key conforming to the // "net.sock.peer.addr" semantic conventions. It represents the remote // socket peer address: IPv4 or IPv6 for internet protocols, path for local // communication, // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '127.0.0.1', '/tmp/mysql.sock' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // NetSockPeerPortKey is the attribute Key conforming to the // "net.sock.peer.port" semantic conventions. It represents the remote // socket peer port. // // Type: int // RequirementLevel: Recommended (If defined for the address family and if // different than `net.peer.port` and if `net.sock.peer.addr` is set.) // Stability: stable // Examples: 16456 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // NetSockFamilyKey is the attribute Key conforming to the // "net.sock.family" semantic conventions. It represents the protocol // [address // family](https://man7.org/linux/man-pages/man7/address_families.7.html) // which is used for communication. // // Type: Enum // RequirementLevel: ConditionallyRequired (If different than `inet` and if // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support // instrumentations that follow previous versions of this document.) // Stability: stable // Examples: 'inet6', 'bluetooth' NetSockFamilyKey = attribute.Key("net.sock.family") // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" // semantic conventions. It represents the logical remote hostname, see // note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an // extra DNS lookup. NetPeerNameKey = attribute.Key("net.peer.name") // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" // semantic conventions. It represents the logical remote port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // NetHostNameKey is the attribute Key conforming to the "net.host.name" // semantic conventions. It represents the logical local hostname or // similar, see note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // NetHostPortKey is the attribute Key conforming to the "net.host.port" // semantic conventions. It represents the logical local port number, // preferably the one that the peer used to connect // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // NetSockHostAddrKey is the attribute Key conforming to the // "net.sock.host.addr" semantic conventions. It represents the local // socket address. Useful in case of a multi-IP host. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '192.168.0.1' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // NetSockHostPortKey is the attribute Key conforming to the // "net.sock.host.port" semantic conventions. It represents the local // socket port number. // // Type: int // RequirementLevel: ConditionallyRequired (If defined for the address // family and if different than `net.host.port` and if `net.sock.host.addr` // is set. In other cases, it is still recommended to set this.) // Stability: stable // Examples: 35555 NetSockHostPortKey = attribute.Key("net.sock.host.port") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) // NetProtocolName returns an attribute KeyValue conforming to the // "net.protocol.name" semantic conventions. It represents the application // layer protocol used. The value SHOULD be normalized to lowercase. func NetProtocolName(val string) attribute.KeyValue { return NetProtocolNameKey.String(val) } // NetProtocolVersion returns an attribute KeyValue conforming to the // "net.protocol.version" semantic conventions. It represents the version of // the application layer protocol used. See note below. func NetProtocolVersion(val string) attribute.KeyValue { return NetProtocolVersionKey.String(val) } // NetSockPeerName returns an attribute KeyValue conforming to the // "net.sock.peer.name" semantic conventions. It represents the remote socket // peer name. func NetSockPeerName(val string) attribute.KeyValue { return NetSockPeerNameKey.String(val) } // NetSockPeerAddr returns an attribute KeyValue conforming to the // "net.sock.peer.addr" semantic conventions. It represents the remote socket // peer address: IPv4 or IPv6 for internet protocols, path for local // communication, // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). func NetSockPeerAddr(val string) attribute.KeyValue { return NetSockPeerAddrKey.String(val) } // NetSockPeerPort returns an attribute KeyValue conforming to the // "net.sock.peer.port" semantic conventions. It represents the remote socket // peer port. func NetSockPeerPort(val int) attribute.KeyValue { return NetSockPeerPortKey.Int(val) } // NetPeerName returns an attribute KeyValue conforming to the // "net.peer.name" semantic conventions. It represents the logical remote // hostname, see note below. func NetPeerName(val string) attribute.KeyValue { return NetPeerNameKey.String(val) } // NetPeerPort returns an attribute KeyValue conforming to the // "net.peer.port" semantic conventions. It represents the logical remote port // number func NetPeerPort(val int) attribute.KeyValue { return NetPeerPortKey.Int(val) } // NetHostName returns an attribute KeyValue conforming to the // "net.host.name" semantic conventions. It represents the logical local // hostname or similar, see note below. func NetHostName(val string) attribute.KeyValue { return NetHostNameKey.String(val) } // NetHostPort returns an attribute KeyValue conforming to the // "net.host.port" semantic conventions. It represents the logical local port // number, preferably the one that the peer used to connect func NetHostPort(val int) attribute.KeyValue { return NetHostPortKey.Int(val) } // NetSockHostAddr returns an attribute KeyValue conforming to the // "net.sock.host.addr" semantic conventions. It represents the local socket // address. Useful in case of a multi-IP host. func NetSockHostAddr(val string) attribute.KeyValue { return NetSockHostAddrKey.String(val) } // NetSockHostPort returns an attribute KeyValue conforming to the // "net.sock.host.port" semantic conventions. It represents the local socket // port number. func NetSockHostPort(val int) attribute.KeyValue { return NetSockHostPortKey.Int(val) } // These attributes may be used for any network related operation. const ( // NetHostConnectionTypeKey is the attribute Key conforming to the // "net.host.connection.type" semantic conventions. It represents the // internet connection type currently being used by the host. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // NetHostConnectionSubtypeKey is the attribute Key conforming to the // "net.host.connection.subtype" semantic conventions. It represents the // this describes more details regarding the connection.type. It may be the // type of cell technology connection, but it could be used for describing // details about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // NetHostCarrierNameKey is the attribute Key conforming to the // "net.host.carrier.name" semantic conventions. It represents the name of // the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // NetHostCarrierMccKey is the attribute Key conforming to the // "net.host.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // NetHostCarrierMncKey is the attribute Key conforming to the // "net.host.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // NetHostCarrierIccKey is the attribute Key conforming to the // "net.host.carrier.icc" semantic conventions. It represents the ISO // 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // NetHostCarrierName returns an attribute KeyValue conforming to the // "net.host.carrier.name" semantic conventions. It represents the name of the // mobile carrier. func NetHostCarrierName(val string) attribute.KeyValue { return NetHostCarrierNameKey.String(val) } // NetHostCarrierMcc returns an attribute KeyValue conforming to the // "net.host.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. func NetHostCarrierMcc(val string) attribute.KeyValue { return NetHostCarrierMccKey.String(val) } // NetHostCarrierMnc returns an attribute KeyValue conforming to the // "net.host.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. func NetHostCarrierMnc(val string) attribute.KeyValue { return NetHostCarrierMncKey.String(val) } // NetHostCarrierIcc returns an attribute KeyValue conforming to the // "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 // alpha-2 2-character country code associated with the mobile carrier network. func NetHostCarrierIcc(val string) attribute.KeyValue { return NetHostCarrierIccKey.String(val) } // Semantic conventions for HTTP client and server Spans. const ( // HTTPRequestContentLengthKey is the attribute Key conforming to the // "http.request_content_length" semantic conventions. It represents the // size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // HTTPResponseContentLengthKey is the attribute Key conforming to the // "http.response_content_length" semantic conventions. It represents the // size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) // HTTPRequestContentLength returns an attribute KeyValue conforming to the // "http.request_content_length" semantic conventions. It represents the size // of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPRequestContentLength(val int) attribute.KeyValue { return HTTPRequestContentLengthKey.Int(val) } // HTTPResponseContentLength returns an attribute KeyValue conforming to the // "http.response_content_length" semantic conventions. It represents the size // of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPResponseContentLength(val int) attribute.KeyValue { return HTTPResponseContentLengthKey.Int(val) } // Semantic convention describing per-message attributes populated on messaging // spans or links. const ( // MessagingMessageIDKey is the attribute Key conforming to the // "messaging.message.id" semantic conventions. It represents a value used // by the messaging system as an identifier for the message, represented as // a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message.id") // MessagingMessageConversationIDKey is the attribute Key conforming to the // "messaging.message.conversation_id" semantic conventions. It represents // the [conversation ID](#conversations) identifying the conversation to // which the message belongs, represented as a string. Sometimes called // "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to // the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also // use this attribute if it is unknown whether the compressed or // uncompressed payload size is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key // conforming to the "messaging.message.payload_compressed_size_bytes" // semantic conventions. It represents the compressed size of the message // payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ) // MessagingMessageID returns an attribute KeyValue conforming to the // "messaging.message.id" semantic conventions. It represents a value used by // the messaging system as an identifier for the message, represented as a // string. func MessagingMessageID(val string) attribute.KeyValue { return MessagingMessageIDKey.String(val) } // MessagingMessageConversationID returns an attribute KeyValue conforming // to the "messaging.message.conversation_id" semantic conventions. It // represents the [conversation ID](#conversations) identifying the // conversation to which the message belongs, represented as a string. // Sometimes called "Correlation ID". func MessagingMessageConversationID(val string) attribute.KeyValue { return MessagingMessageConversationIDKey.String(val) } // MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming // to the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also use // this attribute if it is unknown whether the compressed or uncompressed // payload size is reported. func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadSizeBytesKey.Int(val) } // MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue // conforming to the "messaging.message.payload_compressed_size_bytes" semantic // conventions. It represents the compressed size of the message payload in // bytes. func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) } // Semantic convention for attributes that describe messaging destination on // broker const ( // MessagingDestinationNameKey is the attribute Key conforming to the // "messaging.destination.name" semantic conventions. It represents the // message destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Destination name SHOULD uniquely identify a specific queue, topic // or other entity within the broker. If // the broker does not have such notion, the destination name SHOULD // uniquely identify the broker. MessagingDestinationNameKey = attribute.Key("messaging.destination.name") // MessagingDestinationTemplateKey is the attribute Key conforming to the // "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Destination names could be constructed from templates. An example // would be a destination name involving a user name or product id. // Although the destination name in this case is of high cardinality, the // underlying template is of low cardinality and can be effectively used // for grouping and aggregation. MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") // MessagingDestinationTemporaryKey is the attribute Key conforming to the // "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might // not exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") // MessagingDestinationAnonymousKey is the attribute Key conforming to the // "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ) // MessagingDestinationName returns an attribute KeyValue conforming to the // "messaging.destination.name" semantic conventions. It represents the message // destination name func MessagingDestinationName(val string) attribute.KeyValue { return MessagingDestinationNameKey.String(val) } // MessagingDestinationTemplate returns an attribute KeyValue conforming to // the "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name func MessagingDestinationTemplate(val string) attribute.KeyValue { return MessagingDestinationTemplateKey.String(val) } // MessagingDestinationTemporary returns an attribute KeyValue conforming to // the "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might not // exist anymore after messages are processed. func MessagingDestinationTemporary(val bool) attribute.KeyValue { return MessagingDestinationTemporaryKey.Bool(val) } // MessagingDestinationAnonymous returns an attribute KeyValue conforming to // the "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). func MessagingDestinationAnonymous(val bool) attribute.KeyValue { return MessagingDestinationAnonymousKey.Bool(val) } // Semantic convention for attributes that describe messaging source on broker const ( // MessagingSourceNameKey is the attribute Key conforming to the // "messaging.source.name" semantic conventions. It represents the message // source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Source name SHOULD uniquely identify a specific queue, topic, or // other entity within the broker. If // the broker does not have such notion, the source name SHOULD uniquely // identify the broker. MessagingSourceNameKey = attribute.Key("messaging.source.name") // MessagingSourceTemplateKey is the attribute Key conforming to the // "messaging.source.template" semantic conventions. It represents the low // cardinality representation of the messaging source name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Source names could be constructed from templates. An example would // be a source name involving a user name or product id. Although the // source name in this case is of high cardinality, the underlying template // is of low cardinality and can be effectively used for grouping and // aggregation. MessagingSourceTemplateKey = attribute.Key("messaging.source.template") // MessagingSourceTemporaryKey is the attribute Key conforming to the // "messaging.source.temporary" semantic conventions. It represents a // boolean that is true if the message source is temporary and might not // exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") // MessagingSourceAnonymousKey is the attribute Key conforming to the // "messaging.source.anonymous" semantic conventions. It represents a // boolean that is true if the message source is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") ) // MessagingSourceName returns an attribute KeyValue conforming to the // "messaging.source.name" semantic conventions. It represents the message // source name func MessagingSourceName(val string) attribute.KeyValue { return MessagingSourceNameKey.String(val) } // MessagingSourceTemplate returns an attribute KeyValue conforming to the // "messaging.source.template" semantic conventions. It represents the low // cardinality representation of the messaging source name func MessagingSourceTemplate(val string) attribute.KeyValue { return MessagingSourceTemplateKey.String(val) } // MessagingSourceTemporary returns an attribute KeyValue conforming to the // "messaging.source.temporary" semantic conventions. It represents a boolean // that is true if the message source is temporary and might not exist anymore // after messages are processed. func MessagingSourceTemporary(val bool) attribute.KeyValue { return MessagingSourceTemporaryKey.Bool(val) } // MessagingSourceAnonymous returns an attribute KeyValue conforming to the // "messaging.source.anonymous" semantic conventions. It represents a boolean // that is true if the message source is anonymous (could be unnamed or have // auto-generated name). func MessagingSourceAnonymous(val bool) attribute.KeyValue { return MessagingSourceAnonymousKey.Bool(val) } // Attributes for RabbitMQ const ( // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ) // MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { return MessagingRabbitmqDestinationRoutingKeyKey.String(val) } // Attributes for Apache Kafka const ( // MessagingKafkaMessageKeyKey is the attribute Key conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure // they're processed on the same partition. They differ from // `messaging.message.id` in that they're not unique. If the key is `null`, // the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to // be supplied for the attribute. If the key has no unambiguous, canonical // string form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the // "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only // applies to consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") // MessagingKafkaClientIDKey is the attribute Key conforming to the // "messaging.kafka.client_id" semantic conventions. It represents the // client ID for the Consumer or Producer that is handling the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to // the "messaging.kafka.destination.partition" semantic conventions. It // represents the partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the // "messaging.kafka.source.partition" semantic conventions. It represents // the partition the message is received from. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the // "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the // "messaging.kafka.message.tombstone" semantic conventions. It represents // a boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When // missing, the value is assumed to be `false`.) // Stability: stable MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ) // MessagingKafkaMessageKey returns an attribute KeyValue conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message.id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be // set. func MessagingKafkaMessageKey(val string) attribute.KeyValue { return MessagingKafkaMessageKeyKey.String(val) } // MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to // the "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only applies // to consumers, not producers. func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { return MessagingKafkaConsumerGroupKey.String(val) } // MessagingKafkaClientID returns an attribute KeyValue conforming to the // "messaging.kafka.client_id" semantic conventions. It represents the client // ID for the Consumer or Producer that is handling the message. func MessagingKafkaClientID(val string) attribute.KeyValue { return MessagingKafkaClientIDKey.String(val) } // MessagingKafkaDestinationPartition returns an attribute KeyValue // conforming to the "messaging.kafka.destination.partition" semantic // conventions. It represents the partition the message is sent to. func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { return MessagingKafkaDestinationPartitionKey.Int(val) } // MessagingKafkaSourcePartition returns an attribute KeyValue conforming to // the "messaging.kafka.source.partition" semantic conventions. It represents // the partition the message is received from. func MessagingKafkaSourcePartition(val int) attribute.KeyValue { return MessagingKafkaSourcePartitionKey.Int(val) } // MessagingKafkaMessageOffset returns an attribute KeyValue conforming to // the "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. func MessagingKafkaMessageOffset(val int) attribute.KeyValue { return MessagingKafkaMessageOffsetKey.Int(val) } // MessagingKafkaMessageTombstone returns an attribute KeyValue conforming // to the "messaging.kafka.message.tombstone" semantic conventions. It // represents a boolean that is true if the message is a tombstone. func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { return MessagingKafkaMessageTombstoneKey.Bool(val) } // Attributes for Apache RocketMQ const ( // MessagingRocketmqNamespaceKey is the attribute Key conforming to the // "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // MessagingRocketmqClientGroupKey is the attribute Key conforming to the // "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // MessagingRocketmqClientIDKey is the attribute Key conforming to the // "messaging.rocketmq.client_id" semantic conventions. It represents the // unique identifier for each client. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key // conforming to the "messaging.rocketmq.message.delivery_timestamp" // semantic conventions. It represents the timestamp in milliseconds that // the delay message is expected to be delivered to consumer. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delay time level is not specified.) // Stability: stable // Examples: 1665987217045 MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delivery timestamp is not specified.) // Stability: stable // Examples: 3 MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the // "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. // // Type: string // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) // Stability: stable // Examples: 'myMessageGroup' MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the // "messaging.rocketmq.message.type" semantic conventions. It represents // the type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") // MessagingRocketmqMessageTagKey is the attribute Key conforming to the // "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the // "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to // the "messaging.rocketmq.consumption_model" semantic conventions. It // represents the model of message consumption. This only applies to // consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // MessagingRocketmqNamespace returns an attribute KeyValue conforming to // the "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. func MessagingRocketmqNamespace(val string) attribute.KeyValue { return MessagingRocketmqNamespaceKey.String(val) } // MessagingRocketmqClientGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. func MessagingRocketmqClientGroup(val string) attribute.KeyValue { return MessagingRocketmqClientGroupKey.String(val) } // MessagingRocketmqClientID returns an attribute KeyValue conforming to the // "messaging.rocketmq.client_id" semantic conventions. It represents the // unique identifier for each client. func MessagingRocketmqClientID(val string) attribute.KeyValue { return MessagingRocketmqClientIDKey.String(val) } // MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic // conventions. It represents the timestamp in milliseconds that the delay // message is expected to be delivered to consumer. func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) } // MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) } // MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { return MessagingRocketmqMessageGroupKey.String(val) } // MessagingRocketmqMessageTag returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. func MessagingRocketmqMessageTag(val string) attribute.KeyValue { return MessagingRocketmqMessageTagKey.String(val) } // MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { return MessagingRocketmqMessageKeysKey.StringSlice(val) } // Describes user-agent attributes. const ( // UserAgentOriginalKey is the attribute Key conforming to the // "user_agent.original" semantic conventions. It represents the value of // the [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' UserAgentOriginalKey = attribute.Key("user_agent.original") ) // UserAgentOriginal returns an attribute KeyValue conforming to the // "user_agent.original" semantic conventions. It represents the value of the // [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. func UserAgentOriginal(val string) attribute.KeyValue { return UserAgentOriginalKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.20.0/doc.go000066400000000000000000000016641452547353200205650ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.20.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" opentelemetry-go-1.21.0/semconv/v1.20.0/event.go000066400000000000000000000173141452547353200211400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" import "go.opentelemetry.io/otel/attribute" // This semantic convention defines the attributes used to represent a feature // flag evaluation as an event. const ( // FeatureFlagKeyKey is the attribute Key conforming to the // "feature_flag.key" semantic conventions. It represents the unique // identifier of the feature flag. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'logo-color' FeatureFlagKeyKey = attribute.Key("feature_flag.key") // FeatureFlagProviderNameKey is the attribute Key conforming to the // "feature_flag.provider_name" semantic conventions. It represents the // name of the service provider that performs the flag evaluation. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'Flag Manager' FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") // FeatureFlagVariantKey is the attribute Key conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be // a semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'red', 'true', 'on' // Note: A semantic identifier, commonly referred to as a variant, provides // a means // for referring to a value without including the value itself. This can // provide additional context for understanding the meaning behind a value. // For example, the variant `red` maybe be used for the value `#c05543`. // // A stringified version of the value can be used in situations where a // semantic identifier is unavailable. String representation of the value // should be determined by the implementer. FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ) // FeatureFlagKey returns an attribute KeyValue conforming to the // "feature_flag.key" semantic conventions. It represents the unique identifier // of the feature flag. func FeatureFlagKey(val string) attribute.KeyValue { return FeatureFlagKeyKey.String(val) } // FeatureFlagProviderName returns an attribute KeyValue conforming to the // "feature_flag.provider_name" semantic conventions. It represents the name of // the service provider that performs the flag evaluation. func FeatureFlagProviderName(val string) attribute.KeyValue { return FeatureFlagProviderNameKey.String(val) } // FeatureFlagVariant returns an attribute KeyValue conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be a // semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. func FeatureFlagVariant(val string) attribute.KeyValue { return FeatureFlagVariantKey.String(val) } // RPC received/sent message. const ( // MessageTypeKey is the attribute Key conforming to the "message.type" // semantic conventions. It represents the whether this is a received or // sent message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessageTypeKey = attribute.Key("message.type") // MessageIDKey is the attribute Key conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two // different counters starting from `1` one for sent messages and one for // received message. // // Type: int // RequirementLevel: Optional // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // MessageCompressedSizeKey is the attribute Key conforming to the // "message.compressed_size" semantic conventions. It represents the // compressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // MessageUncompressedSizeKey is the attribute Key conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) // MessageID returns an attribute KeyValue conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two different // counters starting from `1` one for sent messages and one for received // message. func MessageID(val int) attribute.KeyValue { return MessageIDKey.Int(val) } // MessageCompressedSize returns an attribute KeyValue conforming to the // "message.compressed_size" semantic conventions. It represents the compressed // size of the message in bytes. func MessageCompressedSize(val int) attribute.KeyValue { return MessageCompressedSizeKey.Int(val) } // MessageUncompressedSize returns an attribute KeyValue conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. func MessageUncompressedSize(val int) attribute.KeyValue { return MessageUncompressedSizeKey.Int(val) } // The attributes used to report a single exception associated with a span. const ( // ExceptionEscapedKey is the attribute Key conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be // set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of // a span, // if that span is ended while the exception is still logically "in // flight". // This may be actually "in flight" in some languages (e.g. if the // exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most // languages. // // It is usually not possible to determine at the point where an exception // is thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending // the span, // as done in the [example above](#recording-an-exception). // // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // ExceptionEscaped returns an attribute KeyValue conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be set to // true if the exception event is recorded at a point where it is known that // the exception is escaping the scope of the span. func ExceptionEscaped(val bool) attribute.KeyValue { return ExceptionEscapedKey.Bool(val) } opentelemetry-go-1.21.0/semconv/v1.20.0/exception.go000066400000000000000000000014301452547353200220050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.20.0/http.go000066400000000000000000000014401452547353200207670ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) opentelemetry-go-1.21.0/semconv/v1.20.0/httpconv/000077500000000000000000000000001452547353200213275ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.20.0/httpconv/http.go000066400000000000000000000150471452547353200226440ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package httpconv provides OpenTelemetry HTTP semantic conventions for // tracing telemetry. package httpconv // import "go.opentelemetry.io/otel/semconv/v1.20.0/httpconv" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal/v4" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) var ( nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } hc = &internal.HTTPConv{ NetConv: nc, EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, NetProtocolNameKey: semconv.NetProtocolNameKey, NetProtocolVersionKey: semconv.NetProtocolVersionKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, HTTPRouteKey: semconv.HTTPRouteKey, HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, UserAgentOriginalKey: semconv.UserAgentOriginalKey, } ) // ClientResponse returns trace attributes for an HTTP response received by a // client from a server. It will return the following attributes if the related // values are defined in resp: "http.status.code", // "http.response_content_length". // // This does not add all OpenTelemetry required attributes for an HTTP event, // it assumes ClientRequest was used to create the span with a complete set of // attributes. If a complete set of attributes can be generated using the // request contained in resp. For example: // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func ClientResponse(resp *http.Response) []attribute.KeyValue { return hc.ClientResponse(resp) } // ClientRequest returns trace attributes for an HTTP request made by a client. // The following attributes are always returned: "http.url", // "net.protocol.(name|version)", "http.method", "net.peer.name". // The following attributes are returned if the related values are defined // in req: "net.peer.port", "http.user_agent", "http.request_content_length", // "enduser.id". func ClientRequest(req *http.Request) []attribute.KeyValue { return hc.ClientRequest(req) } // ClientStatus returns a span status code and message for an HTTP status code // value received by a client. func ClientStatus(code int) (codes.Code, string) { return hc.ClientStatus(code) } // ServerRequest returns trace attributes for an HTTP request received by a // server. // // The server must be the primary server name if it is known. For example this // would be the ServerName directive // (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache // server, and the server_name directive // (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an // nginx server. More generically, the primary server name would be the host // header value that matches the default virtual host of an HTTP server. It // should include the host identifier and if a port is used to route to the // server that port identifier should be included as an appropriate port // suffix. // // If the primary server name is not known, server should be an empty string. // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", // ""net.protocol.(name|version)", "http.target", "net.host.name". // The following attributes are returned if they related values are defined // in req: "net.host.port", "net.sock.peer.addr", "net.sock.peer.port", // "user_agent.original", "enduser.id", "http.client_ip". func ServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } // ServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. func ServerStatus(code int) (codes.Code, string) { return hc.ServerStatus(code) } // RequestHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the user_agent.original attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func RequestHeader(h http.Header) []attribute.KeyValue { return hc.RequestHeader(h) } // ResponseHeader returns the contents of h as attributes. // // Instrumentation should require an explicit configuration of which headers to // captured and then prune what they pass here. Including all headers can be a // security risk - explicit configuration helps avoid leaking sensitive // information. // // The User-Agent header is already captured in the user_agent.original attribute // from ClientRequest and ServerRequest. Instrumentation may provide an option // to capture that header here even though it is not recommended. Otherwise, // instrumentation should filter that out of what is passed. func ResponseHeader(h http.Header) []attribute.KeyValue { return hc.ResponseHeader(h) } opentelemetry-go-1.21.0/semconv/v1.20.0/netconv/000077500000000000000000000000001452547353200211365ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.20.0/netconv/net.go000066400000000000000000000053211452547353200222540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package netconv provides OpenTelemetry network semantic conventions for // tracing telemetry. package netconv // import "go.opentelemetry.io/otel/semconv/v1.20.0/netconv" import ( "net" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/semconv/internal/v3" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) var nc = &internal.NetConv{ NetHostNameKey: semconv.NetHostNameKey, NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, NetSockHostAddrKey: semconv.NetSockHostAddrKey, NetSockHostPortKey: semconv.NetSockHostPortKey, NetTransportOther: semconv.NetTransportOther, NetTransportTCP: semconv.NetTransportTCP, NetTransportUDP: semconv.NetTransportUDP, NetTransportInProc: semconv.NetTransportInProc, } // Transport returns a trace attribute describing the transport protocol of the // passed network. See the net.Dial for information about acceptable network // values. func Transport(network string) attribute.KeyValue { return nc.Transport(network) } // Client returns trace attributes for a client network connection to address. // See net.Dial for information about acceptable address values, address should // be the same as the one used to create conn. If conn is nil, only network // peer attributes will be returned that describe address. Otherwise, the // socket level information about conn will also be included. func Client(address string, conn net.Conn) []attribute.KeyValue { return nc.Client(address, conn) } // Server returns trace attributes for a network listener listening at address. // See net.Listen for information about acceptable address values, address // should be the same as the one used to create ln. If ln is nil, only network // host attributes will be returned that describe address. Otherwise, the // socket level information about ln will also be included. func Server(address string, ln net.Listener) []attribute.KeyValue { return nc.Server(address, ln) } opentelemetry-go-1.21.0/semconv/v1.20.0/resource.go000066400000000000000000002356331452547353200216540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is // running. The `browser.*` attributes MUST be used only for resources that // represent applications running in a web browser (regardless of whether // running on a mobile or desktop device). const ( // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" // semantic conventions. It represents the array of brand name and version // separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") // BrowserPlatformKey is the attribute Key conforming to the // "browser.platform" semantic conventions. It represents the platform on // which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute // SHOULD be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client // Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in // the [`os.type` and `os.name` attributes](./os.md). However, for // consistency, the values in the `browser.platform` attribute should // capture the exact value that the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" // semantic conventions. It represents a boolean that is true if the // browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.mobile`). If unavailable, this attribute // SHOULD be left unset. BrowserMobileKey = attribute.Key("browser.mobile") // BrowserLanguageKey is the attribute Key conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'en', 'en-US', 'fr', 'fr-FR' // Note: This value is intended to be taken from the Navigator API // `navigator.language`. BrowserLanguageKey = attribute.Key("browser.language") ) // BrowserBrands returns an attribute KeyValue conforming to the // "browser.brands" semantic conventions. It represents the array of brand name // and version separated by a space func BrowserBrands(val ...string) attribute.KeyValue { return BrowserBrandsKey.StringSlice(val) } // BrowserPlatform returns an attribute KeyValue conforming to the // "browser.platform" semantic conventions. It represents the platform on which // the browser is running func BrowserPlatform(val string) attribute.KeyValue { return BrowserPlatformKey.String(val) } // BrowserMobile returns an attribute KeyValue conforming to the // "browser.mobile" semantic conventions. It represents a boolean that is true // if the browser is running on a mobile device func BrowserMobile(val bool) attribute.KeyValue { return BrowserMobileKey.Bool(val) } // BrowserLanguage returns an attribute KeyValue conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser func BrowserLanguage(val string) attribute.KeyValue { return BrowserLanguageKey.String(val) } // A cloud environment (e.g. GCP, Azure, AWS) const ( // CloudProviderKey is the attribute Key conforming to the "cloud.provider" // semantic conventions. It represents the name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // CloudAccountIDKey is the attribute Key conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account // ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // CloudRegionKey is the attribute Key conforming to the "cloud.region" // semantic conventions. It represents the geographical region the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for // example [Alibaba Cloud // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), // [Google Cloud regions](https://cloud.google.com/about/locations), or // [Tencent Cloud // regions](https://www.tencentcloud.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // CloudResourceIDKey is the attribute Key conforming to the // "cloud.resource_id" semantic conventions. It represents the cloud // provider-specific native identifier of the monitored cloud resource // (e.g. an // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // on AWS, a [fully qualified resource // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // on Azure, a [full resource // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) // on GCP) // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' // Note: On some cloud providers, it may not be possible to determine the // full ID at startup, // so it may be necessary to set `cloud.resource_id` as a span attribute // instead. // // The exact value to use for `cloud.resource_id` depends on the cloud // provider. // The following well-known definitions MUST be used if you set this // attribute and they apply: // // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) // with the resolved function version, as the same runtime instance may // be invokable with // multiple different aliases. // * **GCP:** The [URI of the // resource](https://cloud.google.com/iam/docs/full-resource-names) // * **Azure:** The [Fully Qualified Resource // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider. CloudResourceIDKey = attribute.Key("cloud.resource_id") // CloudAvailabilityZoneKey is the attribute Key conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the // resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google // Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" // semantic conventions. It represents the cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Heroku Platform as a Service CloudProviderHeroku = CloudProviderKey.String("heroku") // IBM Cloud CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // Red Hat OpenShift on Alibaba Cloud CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Red Hat OpenShift on AWS (ROSA) CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Azure Red Hat OpenShift CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Red Hat OpenShift on Google Cloud CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") // Red Hat OpenShift on IBM Cloud CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // CloudAccountID returns an attribute KeyValue conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account ID // the resource is assigned to. func CloudAccountID(val string) attribute.KeyValue { return CloudAccountIDKey.String(val) } // CloudRegion returns an attribute KeyValue conforming to the // "cloud.region" semantic conventions. It represents the geographical region // the resource is running. func CloudRegion(val string) attribute.KeyValue { return CloudRegionKey.String(val) } // CloudResourceID returns an attribute KeyValue conforming to the // "cloud.resource_id" semantic conventions. It represents the cloud // provider-specific native identifier of the monitored cloud resource (e.g. an // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // on AWS, a [fully qualified resource // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // on Azure, a [full resource // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) // on GCP) func CloudResourceID(val string) attribute.KeyValue { return CloudResourceIDKey.String(val) } // CloudAvailabilityZone returns an attribute KeyValue conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to increase // availability. Availability zone represents the zone where the resource is // running. func CloudAvailabilityZone(val string) attribute.KeyValue { return CloudAvailabilityZoneKey.String(val) } // Resources used by AWS Elastic Container Service (ECS). const ( // AWSECSContainerARNKey is the attribute Key conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // AWSECSClusterARNKey is the attribute Key conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an // [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // AWSECSLaunchtypeKey is the attribute Key conforming to the // "aws.ecs.launchtype" semantic conventions. It represents the [launch // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) // for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // AWSECSTaskARNKey is the attribute Key conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an // [ECS task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // AWSECSTaskFamilyKey is the attribute Key conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // AWSECSTaskRevisionKey is the attribute Key conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision // for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // AWSECSContainerARN returns an attribute KeyValue conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). func AWSECSContainerARN(val string) attribute.KeyValue { return AWSECSContainerARNKey.String(val) } // AWSECSClusterARN returns an attribute KeyValue conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). func AWSECSClusterARN(val string) attribute.KeyValue { return AWSECSClusterARNKey.String(val) } // AWSECSTaskARN returns an attribute KeyValue conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS // task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). func AWSECSTaskARN(val string) attribute.KeyValue { return AWSECSTaskARNKey.String(val) } // AWSECSTaskFamily returns an attribute KeyValue conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. func AWSECSTaskFamily(val string) attribute.KeyValue { return AWSECSTaskFamilyKey.String(val) } // AWSECSTaskRevision returns an attribute KeyValue conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision for // this task definition. func AWSECSTaskRevision(val string) attribute.KeyValue { return AWSECSTaskRevisionKey.String(val) } // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // AWSEKSClusterARNKey is the attribute Key conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an // EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // AWSEKSClusterARN returns an attribute KeyValue conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS // cluster. func AWSEKSClusterARN(val string) attribute.KeyValue { return AWSEKSClusterARNKey.String(val) } // Resources specific to Amazon Web Services. const ( // AWSLogGroupNamesKey is the attribute Key conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of // the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like // multi-container applications, where a single application has sidecar // containers, and each write to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // AWSLogGroupARNsKey is the attribute Key conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon // Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // AWSLogStreamNamesKey is the attribute Key conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) // of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // AWSLogStreamARNsKey is the attribute Key conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of // the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). // One log group can contain several log streams, so these ARNs necessarily // identify both a log group and a log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // AWSLogGroupNames returns an attribute KeyValue conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of the // AWS log group(s) an application is writing to. func AWSLogGroupNames(val ...string) attribute.KeyValue { return AWSLogGroupNamesKey.StringSlice(val) } // AWSLogGroupARNs returns an attribute KeyValue conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource // Name(s) (ARN) of the AWS log group(s). func AWSLogGroupARNs(val ...string) attribute.KeyValue { return AWSLogGroupARNsKey.StringSlice(val) } // AWSLogStreamNames returns an attribute KeyValue conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) of // the AWS log stream(s) an application is writing to. func AWSLogStreamNames(val ...string) attribute.KeyValue { return AWSLogStreamNamesKey.StringSlice(val) } // AWSLogStreamARNs returns an attribute KeyValue conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the // AWS log stream(s). func AWSLogStreamARNs(val ...string) attribute.KeyValue { return AWSLogStreamARNsKey.StringSlice(val) } // Heroku dyno metadata const ( // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the // "heroku.release.creation_timestamp" semantic conventions. It represents // the time and date the release was created // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2022-10-23T18:00:42Z' HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") // HerokuReleaseCommitKey is the attribute Key conforming to the // "heroku.release.commit" semantic conventions. It represents the commit // hash for the current release // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" // semantic conventions. It represents the unique identifier for the // application // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' HerokuAppIDKey = attribute.Key("heroku.app.id") ) // HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming // to the "heroku.release.creation_timestamp" semantic conventions. It // represents the time and date the release was created func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { return HerokuReleaseCreationTimestampKey.String(val) } // HerokuReleaseCommit returns an attribute KeyValue conforming to the // "heroku.release.commit" semantic conventions. It represents the commit hash // for the current release func HerokuReleaseCommit(val string) attribute.KeyValue { return HerokuReleaseCommitKey.String(val) } // HerokuAppID returns an attribute KeyValue conforming to the // "heroku.app.id" semantic conventions. It represents the unique identifier // for the application func HerokuAppID(val string) attribute.KeyValue { return HerokuAppIDKey.String(val) } // A container instance. const ( // ContainerNameKey is the attribute Key conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // ContainerIDKey is the attribute Key conforming to the "container.id" // semantic conventions. It represents the container ID. Usually a UUID, as // for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // ContainerRuntimeKey is the attribute Key conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // ContainerImageNameKey is the attribute Key conforming to the // "container.image.name" semantic conventions. It represents the name of // the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // ContainerImageTagKey is the attribute Key conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // ContainerName returns an attribute KeyValue conforming to the // "container.name" semantic conventions. It represents the container name used // by container runtime. func ContainerName(val string) attribute.KeyValue { return ContainerNameKey.String(val) } // ContainerID returns an attribute KeyValue conforming to the // "container.id" semantic conventions. It represents the container ID. Usually // a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. func ContainerID(val string) attribute.KeyValue { return ContainerIDKey.String(val) } // ContainerRuntime returns an attribute KeyValue conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. func ContainerRuntime(val string) attribute.KeyValue { return ContainerRuntimeKey.String(val) } // ContainerImageName returns an attribute KeyValue conforming to the // "container.image.name" semantic conventions. It represents the name of the // image the container was built on. func ContainerImageName(val string) attribute.KeyValue { return ContainerImageNameKey.String(val) } // ContainerImageTag returns an attribute KeyValue conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. func ContainerImageTag(val string) attribute.KeyValue { return ContainerImageTagKey.String(val) } // The software deployment. const ( // DeploymentEnvironmentKey is the attribute Key conforming to the // "deployment.environment" semantic conventions. It represents the name of // the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // DeploymentEnvironment returns an attribute KeyValue conforming to the // "deployment.environment" semantic conventions. It represents the name of the // [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). func DeploymentEnvironment(val string) attribute.KeyValue { return DeploymentEnvironmentKey.String(val) } // The device on which the process represented by this resource is running. const ( // DeviceIDKey is the attribute Key conforming to the "device.id" semantic // conventions. It represents a unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values // outlined below. This value is not an advertising identifier and MUST NOT // be used as such. On iOS (Swift or Objective-C), this value MUST be equal // to the [vendor // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). // On Android (Java or Kotlin), this value MUST be equal to the Firebase // Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on // best practices and exact implementation details. Caution should be taken // when storing personal data or anything which can identify a user. GDPR // and data protection laws may apply, ensure you do your own due // diligence. DeviceIDKey = attribute.Key("device.id") // DeviceModelIdentifierKey is the attribute Key conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version // of the model identifier rather than the market or consumer-friendly name // of the device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // DeviceModelNameKey is the attribute Key conforming to the // "device.model.name" semantic conventions. It represents the marketing // name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of // the device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // DeviceManufacturerKey is the attribute Key conforming to the // "device.manufacturer" semantic conventions. It represents the name of // the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // DeviceID returns an attribute KeyValue conforming to the "device.id" // semantic conventions. It represents a unique identifier representing the // device func DeviceID(val string) attribute.KeyValue { return DeviceIDKey.String(val) } // DeviceModelIdentifier returns an attribute KeyValue conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device func DeviceModelIdentifier(val string) attribute.KeyValue { return DeviceModelIdentifierKey.String(val) } // DeviceModelName returns an attribute KeyValue conforming to the // "device.model.name" semantic conventions. It represents the marketing name // for the device model func DeviceModelName(val string) attribute.KeyValue { return DeviceModelNameKey.String(val) } // DeviceManufacturer returns an attribute KeyValue conforming to the // "device.manufacturer" semantic conventions. It represents the name of the // device manufacturer func DeviceManufacturer(val string) attribute.KeyValue { return DeviceManufacturerKey.String(val) } // A serverless instance. const ( // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic // conventions. It represents the name of the single function that this // runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the // FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) // span attributes). // // For some cloud providers, the above definition is ambiguous. The // following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud // providers/products: // // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `cloud.resource_id` attribute). FaaSNameKey = attribute.Key("faas.name") // FaaSVersionKey is the attribute Key conforming to the "faas.version" // semantic conventions. It represents the immutable version of the // function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" // semantic conventions. It represents the execution environment ID as a // string, that will be potentially reused for other invocations to the // same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // FaaSMaxMemoryKey is the attribute Key conforming to the // "faas.max_memory" semantic conventions. It represents the amount of // memory available to the serverless function converted to Bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 134217728 // Note: It's recommended to set this attribute since e.g. too little // memory can easily stop a Java AWS Lambda function from working // correctly. On AWS Lambda, the environment variable // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must // be multiplied by 1,048,576). FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // FaaSName returns an attribute KeyValue conforming to the "faas.name" // semantic conventions. It represents the name of the single function that // this runtime instance executes. func FaaSName(val string) attribute.KeyValue { return FaaSNameKey.String(val) } // FaaSVersion returns an attribute KeyValue conforming to the // "faas.version" semantic conventions. It represents the immutable version of // the function being executed. func FaaSVersion(val string) attribute.KeyValue { return FaaSVersionKey.String(val) } // FaaSInstance returns an attribute KeyValue conforming to the // "faas.instance" semantic conventions. It represents the execution // environment ID as a string, that will be potentially reused for other // invocations to the same function/function version. func FaaSInstance(val string) attribute.KeyValue { return FaaSInstanceKey.String(val) } // FaaSMaxMemory returns an attribute KeyValue conforming to the // "faas.max_memory" semantic conventions. It represents the amount of memory // available to the serverless function converted to Bytes. func FaaSMaxMemory(val int) attribute.KeyValue { return FaaSMaxMemoryKey.Int(val) } // A host is defined as a general computing instance. const ( // HostIDKey is the attribute Key conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be // the instance_id assigned by the cloud provider. For non-containerized // systems, this should be the `machine-id`. See the table below for the // sources to use to determine the `machine-id` based on operating system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'fdbf79e8af94cb7f9e8df36789187052' HostIDKey = attribute.Key("host.id") // HostNameKey is the attribute Key conforming to the "host.name" semantic // conventions. It represents the name of the host. On Unix systems, it may // contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // HostTypeKey is the attribute Key conforming to the "host.type" semantic // conventions. It represents the type of host. For Cloud, this must be the // machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // HostArchKey is the attribute Key conforming to the "host.arch" semantic // conventions. It represents the CPU architecture the host system is // running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // HostImageNameKey is the attribute Key conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // HostImageIDKey is the attribute Key conforming to the "host.image.id" // semantic conventions. It represents the vM image ID. For Cloud, this // value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // HostImageVersionKey is the attribute Key conforming to the // "host.image.version" semantic conventions. It represents the version // string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // HostID returns an attribute KeyValue conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be the // instance_id assigned by the cloud provider. For non-containerized systems, // this should be the `machine-id`. See the table below for the sources to use // to determine the `machine-id` based on operating system. func HostID(val string) attribute.KeyValue { return HostIDKey.String(val) } // HostName returns an attribute KeyValue conforming to the "host.name" // semantic conventions. It represents the name of the host. On Unix systems, // it may contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. func HostName(val string) attribute.KeyValue { return HostNameKey.String(val) } // HostType returns an attribute KeyValue conforming to the "host.type" // semantic conventions. It represents the type of host. For Cloud, this must // be the machine type. func HostType(val string) attribute.KeyValue { return HostTypeKey.String(val) } // HostImageName returns an attribute KeyValue conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. func HostImageName(val string) attribute.KeyValue { return HostImageNameKey.String(val) } // HostImageID returns an attribute KeyValue conforming to the // "host.image.id" semantic conventions. It represents the vM image ID. For // Cloud, this value is from the provider. func HostImageID(val string) attribute.KeyValue { return HostImageIDKey.String(val) } // HostImageVersion returns an attribute KeyValue conforming to the // "host.image.version" semantic conventions. It represents the version string // of the VM image as defined in [Version // Attributes](README.md#version-attributes). func HostImageVersion(val string) attribute.KeyValue { return HostImageVersionKey.String(val) } // A Kubernetes Cluster. const ( // K8SClusterNameKey is the attribute Key conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // K8SClusterName returns an attribute KeyValue conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. func K8SClusterName(val string) attribute.KeyValue { return K8SClusterNameKey.String(val) } // A Kubernetes Node object. const ( // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // K8SNodeName returns an attribute KeyValue conforming to the // "k8s.node.name" semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { return K8SNodeNameKey.String(val) } // K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } // A Kubernetes Namespace. const ( // K8SNamespaceNameKey is the attribute Key conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } // A Kubernetes Pod object. const ( // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { return K8SPodNameKey.String(val) } // A container in a // [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // K8SContainerNameKey is the attribute Key conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // K8SContainerRestartCountKey is the attribute Key conforming to the // "k8s.container.restart_count" semantic conventions. It represents the // number of times the container was restarted. This attribute can be used // to identify a particular container (running or stopped) within a // container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // K8SContainerName returns an attribute KeyValue conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). func K8SContainerName(val string) attribute.KeyValue { return K8SContainerNameKey.String(val) } // K8SContainerRestartCount returns an attribute KeyValue conforming to the // "k8s.container.restart_count" semantic conventions. It represents the number // of times the container was restarted. This attribute can be used to identify // a particular container (running or stopped) within a container spec. func K8SContainerRestartCount(val int) attribute.KeyValue { return K8SContainerRestartCountKey.Int(val) } // A Kubernetes ReplicaSet object. const ( // K8SReplicaSetUIDKey is the attribute Key conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // K8SReplicaSetNameKey is the attribute Key conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of // the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // K8SReplicaSetUID returns an attribute KeyValue conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. func K8SReplicaSetUID(val string) attribute.KeyValue { return K8SReplicaSetUIDKey.String(val) } // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. func K8SReplicaSetName(val string) attribute.KeyValue { return K8SReplicaSetNameKey.String(val) } // A Kubernetes Deployment object. const ( // K8SDeploymentUIDKey is the attribute Key conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // K8SDeploymentNameKey is the attribute Key conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of // the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // K8SDeploymentUID returns an attribute KeyValue conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. func K8SDeploymentUID(val string) attribute.KeyValue { return K8SDeploymentUIDKey.String(val) } // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. func K8SDeploymentName(val string) attribute.KeyValue { return K8SDeploymentNameKey.String(val) } // A Kubernetes StatefulSet object. const ( // K8SStatefulSetUIDKey is the attribute Key conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // K8SStatefulSetNameKey is the attribute Key conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of // the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // K8SStatefulSetUID returns an attribute KeyValue conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. func K8SStatefulSetUID(val string) attribute.KeyValue { return K8SStatefulSetUIDKey.String(val) } // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. func K8SStatefulSetName(val string) attribute.KeyValue { return K8SStatefulSetNameKey.String(val) } // A Kubernetes DaemonSet object. const ( // K8SDaemonSetUIDKey is the attribute Key conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // K8SDaemonSetNameKey is the attribute Key conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // K8SDaemonSetUID returns an attribute KeyValue conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. func K8SDaemonSetName(val string) attribute.KeyValue { return K8SDaemonSetNameKey.String(val) } // A Kubernetes Job object. const ( // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { return K8SJobNameKey.String(val) } // A Kubernetes CronJob object. const ( // K8SCronJobUIDKey is the attribute Key conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // K8SCronJobNameKey is the attribute Key conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // K8SCronJobUID returns an attribute KeyValue conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. func K8SCronJobName(val string) attribute.KeyValue { return K8SCronJobNameKey.String(val) } // The operating system (OS) on which the process represented by this resource // is running. const ( // OSTypeKey is the attribute Key conforming to the "os.type" semantic // conventions. It represents the operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // OSDescriptionKey is the attribute Key conforming to the "os.description" // semantic conventions. It represents the human readable (not intended to // be parsed) OS version information, like e.g. reported by `ver` or // `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 // LTS' OSDescriptionKey = attribute.Key("os.description") // OSNameKey is the attribute Key conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // OSVersionKey is the attribute Key conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // OSDescription returns an attribute KeyValue conforming to the // "os.description" semantic conventions. It represents the human readable (not // intended to be parsed) OS version information, like e.g. reported by `ver` // or `lsb_release -a` commands. func OSDescription(val string) attribute.KeyValue { return OSDescriptionKey.String(val) } // OSName returns an attribute KeyValue conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. func OSName(val string) attribute.KeyValue { return OSNameKey.String(val) } // OSVersion returns an attribute KeyValue conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). func OSVersion(val string) attribute.KeyValue { return OSVersionKey.String(val) } // An operating system process. const ( // ProcessPIDKey is the attribute Key conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // ProcessParentPIDKey is the attribute Key conforming to the // "process.parent_pid" semantic conventions. It represents the parent // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // ProcessExecutableNameKey is the attribute Key conforming to the // "process.executable.name" semantic conventions. It represents the name // of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name // of `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // ProcessExecutablePathKey is the attribute Key conforming to the // "process.executable.path" semantic conventions. It represents the full // path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // ProcessCommandKey is the attribute Key conforming to the // "process.command" semantic conventions. It represents the command used // to launch the process (i.e. the command name). On Linux based systems, // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can // be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // ProcessCommandLineKey is the attribute Key conforming to the // "process.command_line" semantic conventions. It represents the full // command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. // Do not set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // ProcessCommandArgsKey is the attribute Key conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // ProcessOwnerKey is the attribute Key conforming to the "process.owner" // semantic conventions. It represents the username of the user that owns // the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // ProcessPID returns an attribute KeyValue conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). func ProcessPID(val int) attribute.KeyValue { return ProcessPIDKey.Int(val) } // ProcessParentPID returns an attribute KeyValue conforming to the // "process.parent_pid" semantic conventions. It represents the parent Process // identifier (PID). func ProcessParentPID(val int) attribute.KeyValue { return ProcessParentPIDKey.Int(val) } // ProcessExecutableName returns an attribute KeyValue conforming to the // "process.executable.name" semantic conventions. It represents the name of // the process executable. On Linux based systems, can be set to the `Name` in // `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. func ProcessExecutableName(val string) attribute.KeyValue { return ProcessExecutableNameKey.String(val) } // ProcessExecutablePath returns an attribute KeyValue conforming to the // "process.executable.path" semantic conventions. It represents the full path // to the process executable. On Linux based systems, can be set to the target // of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. func ProcessExecutablePath(val string) attribute.KeyValue { return ProcessExecutablePathKey.String(val) } // ProcessCommand returns an attribute KeyValue conforming to the // "process.command" semantic conventions. It represents the command used to // launch the process (i.e. the command name). On Linux based systems, can be // set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to // the first parameter extracted from `GetCommandLineW`. func ProcessCommand(val string) attribute.KeyValue { return ProcessCommandKey.String(val) } // ProcessCommandLine returns an attribute KeyValue conforming to the // "process.command_line" semantic conventions. It represents the full command // used to launch the process as a single string representing the full command. // On Windows, can be set to the result of `GetCommandLineW`. Do not set this // if you have to assemble it just for monitoring; use `process.command_args` // instead. func ProcessCommandLine(val string) attribute.KeyValue { return ProcessCommandLineKey.String(val) } // ProcessCommandArgs returns an attribute KeyValue conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received by // the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. func ProcessCommandArgs(val ...string) attribute.KeyValue { return ProcessCommandArgsKey.StringSlice(val) } // ProcessOwner returns an attribute KeyValue conforming to the // "process.owner" semantic conventions. It represents the username of the user // that owns the process. func ProcessOwner(val string) attribute.KeyValue { return ProcessOwnerKey.String(val) } // The single (language) runtime instance which is monitored. const ( // ProcessRuntimeNameKey is the attribute Key conforming to the // "process.runtime.name" semantic conventions. It represents the name of // the runtime of this process. For compiled native binaries, this SHOULD // be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // ProcessRuntimeVersionKey is the attribute Key conforming to the // "process.runtime.version" semantic conventions. It represents the // version of the runtime of this process, as returned by the runtime // without modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // ProcessRuntimeDescriptionKey is the attribute Key conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // ProcessRuntimeName returns an attribute KeyValue conforming to the // "process.runtime.name" semantic conventions. It represents the name of the // runtime of this process. For compiled native binaries, this SHOULD be the // name of the compiler. func ProcessRuntimeName(val string) attribute.KeyValue { return ProcessRuntimeNameKey.String(val) } // ProcessRuntimeVersion returns an attribute KeyValue conforming to the // "process.runtime.version" semantic conventions. It represents the version of // the runtime of this process, as returned by the runtime without // modification. func ProcessRuntimeVersion(val string) attribute.KeyValue { return ProcessRuntimeVersionKey.String(val) } // ProcessRuntimeDescription returns an attribute KeyValue conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. func ProcessRuntimeDescription(val string) attribute.KeyValue { return ProcessRuntimeDescriptionKey.String(val) } // A service instance. const ( // ServiceNameKey is the attribute Key conforming to the "service.name" // semantic conventions. It represents the logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled // services. If the value was not specified, SDKs MUST fallback to // `unknown_service:` concatenated with // [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, // the value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") ) // ServiceName returns an attribute KeyValue conforming to the // "service.name" semantic conventions. It represents the logical name of the // service. func ServiceName(val string) attribute.KeyValue { return ServiceNameKey.String(val) } // A service instance. const ( // ServiceNamespaceKey is the attribute Key conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group // of services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` // is expected to be unique for all services that have no explicit // namespace defined (so the empty/unspecified namespace is simply one more // valid namespace). Zero-length namespace string is assumed equal to // unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // ServiceInstanceIDKey is the attribute Key conforming to the // "service.instance.id" semantic conventions. It represents the string ID // of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-k8s-pod-deployment-1', // '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be // globally unique). The ID helps to distinguish instances of the same // service that exist at the same time (e.g. instances of a horizontally // scaled service). It is preferable for the ID to be persistent and stay // the same for the lifetime of the service instance, however it is // acceptable that the ID is ephemeral and changes during important // lifetime events for the service (e.g. service restarts). If the service // has no inherent unique ID that can be used as the value of this // attribute it is recommended to generate a random Version 1 or Version 4 // RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // ServiceVersionKey is the attribute Key conforming to the // "service.version" semantic conventions. It represents the version string // of the service API or implementation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // ServiceNamespace returns an attribute KeyValue conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. func ServiceNamespace(val string) attribute.KeyValue { return ServiceNamespaceKey.String(val) } // ServiceInstanceID returns an attribute KeyValue conforming to the // "service.instance.id" semantic conventions. It represents the string ID of // the service instance. func ServiceInstanceID(val string) attribute.KeyValue { return ServiceInstanceIDKey.String(val) } // ServiceVersion returns an attribute KeyValue conforming to the // "service.version" semantic conventions. It represents the version string of // the service API or implementation. func ServiceVersion(val string) attribute.KeyValue { return ServiceVersionKey.String(val) } // The telemetry SDK used to capture data recorded by the instrumentation // libraries. const ( // TelemetrySDKNameKey is the attribute Key conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // TelemetrySDKLanguageKey is the attribute Key conforming to the // "telemetry.sdk.language" semantic conventions. It represents the // language of the telemetry SDK. // // Type: Enum // RequirementLevel: Required // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // TelemetrySDKVersionKey is the attribute Key conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // TelemetrySDKName returns an attribute KeyValue conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. func TelemetrySDKName(val string) attribute.KeyValue { return TelemetrySDKNameKey.String(val) } // TelemetrySDKVersion returns an attribute KeyValue conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. func TelemetrySDKVersion(val string) attribute.KeyValue { return TelemetrySDKVersionKey.String(val) } // The telemetry SDK used to capture data recorded by the instrumentation // libraries. const ( // TelemetryAutoVersionKey is the attribute Key conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) // TelemetryAutoVersion returns an attribute KeyValue conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. func TelemetryAutoVersion(val string) attribute.KeyValue { return TelemetryAutoVersionKey.String(val) } // Resource describing the packaged software running the application code. Web // engines are typically executed using process.runtime. const ( // WebEngineNameKey is the attribute Key conforming to the "webengine.name" // semantic conventions. It represents the name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // WebEngineVersionKey is the attribute Key conforming to the // "webengine.version" semantic conventions. It represents the version of // the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // WebEngineDescriptionKey is the attribute Key conforming to the // "webengine.description" semantic conventions. It represents the // additional description of the web engine (e.g. detailed version and // edition information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - // 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) // WebEngineName returns an attribute KeyValue conforming to the // "webengine.name" semantic conventions. It represents the name of the web // engine. func WebEngineName(val string) attribute.KeyValue { return WebEngineNameKey.String(val) } // WebEngineVersion returns an attribute KeyValue conforming to the // "webengine.version" semantic conventions. It represents the version of the // web engine. func WebEngineVersion(val string) attribute.KeyValue { return WebEngineVersionKey.String(val) } // WebEngineDescription returns an attribute KeyValue conforming to the // "webengine.description" semantic conventions. It represents the additional // description of the web engine (e.g. detailed version and edition // information). func WebEngineDescription(val string) attribute.KeyValue { return WebEngineDescriptionKey.String(val) } // Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's // concepts. const ( // OTelScopeNameKey is the attribute Key conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' OTelScopeNameKey = attribute.Key("otel.scope.name") // OTelScopeVersionKey is the attribute Key conforming to the // "otel.scope.version" semantic conventions. It represents the version of // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0.0' OTelScopeVersionKey = attribute.Key("otel.scope.version") ) // OTelScopeName returns an attribute KeyValue conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). func OTelScopeName(val string) attribute.KeyValue { return OTelScopeNameKey.String(val) } // OTelScopeVersion returns an attribute KeyValue conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). func OTelScopeVersion(val string) attribute.KeyValue { return OTelScopeVersionKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry // Scope's concepts. const ( // OTelLibraryNameKey is the attribute Key conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, // use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' OTelLibraryNameKey = attribute.Key("otel.library.name") // OTelLibraryVersionKey is the attribute Key conforming to the // "otel.library.version" semantic conventions. It represents the // deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '1.0.0' OTelLibraryVersionKey = attribute.Key("otel.library.version") ) // OTelLibraryName returns an attribute KeyValue conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, use // the `otel.scope.name` attribute. func OTelLibraryName(val string) attribute.KeyValue { return OTelLibraryNameKey.String(val) } // OTelLibraryVersion returns an attribute KeyValue conforming to the // "otel.library.version" semantic conventions. It represents the deprecated, // use the `otel.scope.version` attribute. func OTelLibraryVersion(val string) attribute.KeyValue { return OTelLibraryVersionKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.20.0/schema.go000066400000000000000000000017141452547353200212540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" opentelemetry-go-1.21.0/semconv/v1.20.0/trace.go000066400000000000000000003206641452547353200211220ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" import "go.opentelemetry.io/otel/attribute" // The shared attributes used to report a single exception associated with a // span or log. const ( // ExceptionTypeKey is the attribute Key conforming to the "exception.type" // semantic conventions. It represents the type of the exception (its // fully-qualified class name, if applicable). The dynamic type of the // exception should be preferred over the static type in languages that // support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // ExceptionMessageKey is the attribute Key conforming to the // "exception.message" semantic conventions. It represents the exception // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str // implicitly" ExceptionMessageKey = attribute.Key("exception.message") // ExceptionStacktraceKey is the attribute Key conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace // as a string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) // ExceptionType returns an attribute KeyValue conforming to the // "exception.type" semantic conventions. It represents the type of the // exception (its fully-qualified class name, if applicable). The dynamic type // of the exception should be preferred over the static type in languages that // support it. func ExceptionType(val string) attribute.KeyValue { return ExceptionTypeKey.String(val) } // ExceptionMessage returns an attribute KeyValue conforming to the // "exception.message" semantic conventions. It represents the exception // message. func ExceptionMessage(val string) attribute.KeyValue { return ExceptionMessageKey.String(val) } // ExceptionStacktrace returns an attribute KeyValue conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace as a // string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. func ExceptionStacktrace(val string) attribute.KeyValue { return ExceptionStacktraceKey.String(val) } // The attributes described in this section are rather generic. They may be // used in any Log Record they apply to. const ( // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" // semantic conventions. It represents a unique identifier for the Log // Record. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' // Note: If an id is provided, other log records with the same id will be // considered duplicates and can be removed safely. This means, that two // distinguishable log records MUST have different values. // The id MAY be an [Universally Unique Lexicographically Sortable // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers // (e.g. UUID) may be used as needed. LogRecordUIDKey = attribute.Key("log.record.uid") ) // LogRecordUID returns an attribute KeyValue conforming to the // "log.record.uid" semantic conventions. It represents a unique identifier for // the Log Record. func LogRecordUID(val string) attribute.KeyValue { return LogRecordUIDKey.String(val) } // Span attributes used by AWS Lambda (in addition to general `faas` // attributes). const ( // AWSLambdaInvokedARNKey is the attribute Key conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `cloud.resource_id` if an alias is // involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // AWSLambdaInvokedARN returns an attribute KeyValue conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). func AWSLambdaInvokedARN(val string) attribute.KeyValue { return AWSLambdaInvokedARNKey.String(val) } // Attributes for CloudEvents. CloudEvents is a specification on how to define // event data in a standard way. These attributes can be attached to spans when // performing operations with CloudEvents, regardless of the protocol being // used. const ( // CloudeventsEventIDKey is the attribute Key conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // CloudeventsEventSourceKey is the attribute Key conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', // '/cloudevents/spec/pull/123', 'my-service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // CloudeventsEventSpecVersionKey is the attribute Key conforming to the // "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // CloudeventsEventTypeKey is the attribute Key conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', // 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // CloudeventsEventSubjectKey is the attribute Key conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // CloudeventsEventID returns an attribute KeyValue conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. func CloudeventsEventID(val string) attribute.KeyValue { return CloudeventsEventIDKey.String(val) } // CloudeventsEventSource returns an attribute KeyValue conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. func CloudeventsEventSource(val string) attribute.KeyValue { return CloudeventsEventSourceKey.String(val) } // CloudeventsEventSpecVersion returns an attribute KeyValue conforming to // the "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. func CloudeventsEventSpecVersion(val string) attribute.KeyValue { return CloudeventsEventSpecVersionKey.String(val) } // CloudeventsEventType returns an attribute KeyValue conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. func CloudeventsEventType(val string) attribute.KeyValue { return CloudeventsEventTypeKey.String(val) } // CloudeventsEventSubject returns an attribute KeyValue conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by source). func CloudeventsEventSubject(val string) attribute.KeyValue { return CloudeventsEventSubjectKey.String(val) } // Semantic conventions for the OpenTracing Shim const ( // OpentracingRefTypeKey is the attribute Key conforming to the // "opentracing.ref_type" semantic conventions. It represents the // parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // The attributes used to perform database client calls. const ( // DBSystemKey is the attribute Key conforming to the "db.system" semantic // conventions. It represents an identifier for the database management // system (DBMS) product being used. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // DBConnectionStringKey is the attribute Key conforming to the // "db.connection_string" semantic conventions. It represents the // connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // DBUserKey is the attribute Key conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // DBJDBCDriverClassnameKey is the attribute Key conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) // driver used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // DBNameKey is the attribute Key conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name // of the database being accessed. For commands that switch the database, // this should be set to the target database (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called // "schema name". In case there are multiple layers that could be // considered for database name (e.g. Oracle instance name and schema // name), the database name to be used is the more specific layer (e.g. // Oracle schema name). DBNameKey = attribute.Key("db.name") // DBStatementKey is the attribute Key conforming to the "db.statement" // semantic conventions. It represents the database statement being // executed. // // Type: string // RequirementLevel: Recommended (Should be collected by default only if // there is sanitization that excludes sensitive information.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' DBStatementKey = attribute.Key("db.statement") // DBOperationKey is the attribute Key conforming to the "db.operation" // semantic conventions. It represents the name of the operation being // executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not // applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to // attempt any client-side parsing of `db.statement` just to get this // property, but it should be set if the operation name is provided by the // library being instrumented. If the SQL statement has an ambiguous // operation, or performs more than one operation, this value may be // omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // Microsoft SQL Server Compact DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") // ClickHouse DBSystemClickhouse = DBSystemKey.String("clickhouse") // Cloud Spanner DBSystemSpanner = DBSystemKey.String("spanner") // Trino DBSystemTrino = DBSystemKey.String("trino") ) // DBConnectionString returns an attribute KeyValue conforming to the // "db.connection_string" semantic conventions. It represents the connection // string used to connect to the database. It is recommended to remove embedded // credentials. func DBConnectionString(val string) attribute.KeyValue { return DBConnectionStringKey.String(val) } // DBUser returns an attribute KeyValue conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. func DBUser(val string) attribute.KeyValue { return DBUserKey.String(val) } // DBJDBCDriverClassname returns an attribute KeyValue conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. func DBJDBCDriverClassname(val string) attribute.KeyValue { return DBJDBCDriverClassnameKey.String(val) } // DBName returns an attribute KeyValue conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name of // the database being accessed. For commands that switch the database, this // should be set to the target database (even if the command fails). func DBName(val string) attribute.KeyValue { return DBNameKey.String(val) } // DBStatement returns an attribute KeyValue conforming to the // "db.statement" semantic conventions. It represents the database statement // being executed. func DBStatement(val string) attribute.KeyValue { return DBStatementKey.String(val) } // DBOperation returns an attribute KeyValue conforming to the // "db.operation" semantic conventions. It represents the name of the operation // being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. func DBOperation(val string) attribute.KeyValue { return DBOperationKey.String(val) } // Connection-level attributes for Microsoft SQL Server const ( // DBMSSQLInstanceNameKey is the attribute Key conforming to the // "db.mssql.instance_name" semantic conventions. It represents the // Microsoft SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named // instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no // longer required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // DBMSSQLInstanceName returns an attribute KeyValue conforming to the // "db.mssql.instance_name" semantic conventions. It represents the Microsoft // SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. func DBMSSQLInstanceName(val string) attribute.KeyValue { return DBMSSQLInstanceNameKey.String(val) } // Call-level attributes for Cassandra const ( // DBCassandraPageSizeKey is the attribute Key conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch // size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // DBCassandraConsistencyLevelKey is the attribute Key conforming to the // "db.cassandra.consistency_level" semantic conventions. It represents the // consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // DBCassandraTableKey is the attribute Key conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace // name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra // rather than sql. It is not recommended to attempt any client-side // parsing of `db.statement` just to get this property, but it should be // set if it is provided by the library being instrumented. If the // operation is acting upon an anonymous table, or more than one table, // this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // DBCassandraIdempotenceKey is the attribute Key conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the // whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming // to the "db.cassandra.speculative_execution_count" semantic conventions. // It represents the number of times a query was speculatively executed. // Not set or `0` if the query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // DBCassandraCoordinatorIDKey is the attribute Key conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID // of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // DBCassandraCoordinatorDCKey is the attribute Key conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the // data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // DBCassandraPageSize returns an attribute KeyValue conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch size // used for paging, i.e. how many rows will be returned at once. func DBCassandraPageSize(val int) attribute.KeyValue { return DBCassandraPageSizeKey.Int(val) } // DBCassandraTable returns an attribute KeyValue conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace name // (if applicable). func DBCassandraTable(val string) attribute.KeyValue { return DBCassandraTableKey.String(val) } // DBCassandraIdempotence returns an attribute KeyValue conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the whether // or not the query is idempotent. func DBCassandraIdempotence(val bool) attribute.KeyValue { return DBCassandraIdempotenceKey.Bool(val) } // DBCassandraSpeculativeExecutionCount returns an attribute KeyValue // conforming to the "db.cassandra.speculative_execution_count" semantic // conventions. It represents the number of times a query was speculatively // executed. Not set or `0` if the query was not executed speculatively. func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { return DBCassandraSpeculativeExecutionCountKey.Int(val) } // DBCassandraCoordinatorID returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID of // the coordinating node for a query. func DBCassandraCoordinatorID(val string) attribute.KeyValue { return DBCassandraCoordinatorIDKey.String(val) } // DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the data // center of the coordinating node for a query. func DBCassandraCoordinatorDC(val string) attribute.KeyValue { return DBCassandraCoordinatorDCKey.String(val) } // Call-level attributes for Redis const ( // DBRedisDBIndexKey is the attribute Key conforming to the // "db.redis.database_index" semantic conventions. It represents the index // of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To // be used instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default // database (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // DBRedisDBIndex returns an attribute KeyValue conforming to the // "db.redis.database_index" semantic conventions. It represents the index of // the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be // used instead of the generic `db.name` attribute. func DBRedisDBIndex(val int) attribute.KeyValue { return DBRedisDBIndexKey.Int(val) } // Call-level attributes for MongoDB const ( // DBMongoDBCollectionKey is the attribute Key conforming to the // "db.mongodb.collection" semantic conventions. It represents the // collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // DBMongoDBCollection returns an attribute KeyValue conforming to the // "db.mongodb.collection" semantic conventions. It represents the collection // being accessed within the database stated in `db.name`. func DBMongoDBCollection(val string) attribute.KeyValue { return DBMongoDBCollectionKey.String(val) } // Call-level attributes for SQL databases const ( // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that // the operation is acting upon, including the database name (if // applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting // upon an anonymous table, or more than one table, this value MUST NOT be // set. DBSQLTableKey = attribute.Key("db.sql.table") ) // DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that the // operation is acting upon, including the database name (if applicable). func DBSQLTable(val string) attribute.KeyValue { return DBSQLTableKey.String(val) } // Call-level attributes for Cosmos DB. const ( // DBCosmosDBClientIDKey is the attribute Key conforming to the // "db.cosmosdb.client_id" semantic conventions. It represents the unique // Cosmos client instance id. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") // DBCosmosDBOperationTypeKey is the attribute Key conforming to the // "db.cosmosdb.operation_type" semantic conventions. It represents the // cosmosDB Operation Type. // // Type: Enum // RequirementLevel: ConditionallyRequired (when performing one of the // operations in this list) // Stability: stable DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") // DBCosmosDBConnectionModeKey is the attribute Key conforming to the // "db.cosmosdb.connection_mode" semantic conventions. It represents the // cosmos client connection mode. // // Type: Enum // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as // default)) // Stability: stable DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") // DBCosmosDBContainerKey is the attribute Key conforming to the // "db.cosmosdb.container" semantic conventions. It represents the cosmos // DB container name. // // Type: string // RequirementLevel: ConditionallyRequired (if available) // Stability: stable // Examples: 'anystring' DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the // "db.cosmosdb.request_content_length" semantic conventions. It represents // the request payload size in bytes // // Type: int // RequirementLevel: Optional // Stability: stable DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") // DBCosmosDBStatusCodeKey is the attribute Key conforming to the // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos // DB status code. // // Type: int // RequirementLevel: ConditionallyRequired (if response was received) // Stability: stable // Examples: 200, 201 DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the // "db.cosmosdb.sub_status_code" semantic conventions. It represents the // cosmos DB sub status code. // // Type: int // RequirementLevel: ConditionallyRequired (when response was received and // contained sub-code.) // Stability: stable // Examples: 1000, 1002 DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") // DBCosmosDBRequestChargeKey is the attribute Key conforming to the // "db.cosmosdb.request_charge" semantic conventions. It represents the rU // consumed for that operation // // Type: double // RequirementLevel: ConditionallyRequired (when available) // Stability: stable // Examples: 46.18, 1.0 DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") ) var ( // invalid DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") // create DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") // patch DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") // read DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") // read_feed DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") // delete DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") // replace DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") // execute DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") // query DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") // head DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") // head_feed DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") // upsert DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") // batch DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") // query_plan DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") // execute_javascript DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") ) var ( // Gateway (HTTP) connections mode DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") // Direct connection DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") ) // DBCosmosDBClientID returns an attribute KeyValue conforming to the // "db.cosmosdb.client_id" semantic conventions. It represents the unique // Cosmos client instance id. func DBCosmosDBClientID(val string) attribute.KeyValue { return DBCosmosDBClientIDKey.String(val) } // DBCosmosDBContainer returns an attribute KeyValue conforming to the // "db.cosmosdb.container" semantic conventions. It represents the cosmos DB // container name. func DBCosmosDBContainer(val string) attribute.KeyValue { return DBCosmosDBContainerKey.String(val) } // DBCosmosDBRequestContentLength returns an attribute KeyValue conforming // to the "db.cosmosdb.request_content_length" semantic conventions. It // represents the request payload size in bytes func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { return DBCosmosDBRequestContentLengthKey.Int(val) } // DBCosmosDBStatusCode returns an attribute KeyValue conforming to the // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB // status code. func DBCosmosDBStatusCode(val int) attribute.KeyValue { return DBCosmosDBStatusCodeKey.Int(val) } // DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the // "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos // DB sub status code. func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { return DBCosmosDBSubStatusCodeKey.Int(val) } // DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the // "db.cosmosdb.request_charge" semantic conventions. It represents the rU // consumed for that operation func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { return DBCosmosDBRequestChargeKey.Float64(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's // concepts. const ( // OTelStatusCodeKey is the attribute Key conforming to the // "otel.status_code" semantic conventions. It represents the name of the // code, either "OK" or "ERROR". MUST NOT be set if the status code is // UNSET. // // Type: Enum // RequirementLevel: Optional // Stability: stable OTelStatusCodeKey = attribute.Key("otel.status_code") // OTelStatusDescriptionKey is the attribute Key conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'resource not found' OTelStatusDescriptionKey = attribute.Key("otel.status_description") ) var ( // The operation has been validated by an Application developer or Operator to have completed successfully OTelStatusCodeOk = OTelStatusCodeKey.String("OK") // The operation contains an error OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") ) // OTelStatusDescription returns an attribute KeyValue conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. func OTelStatusDescription(val string) attribute.KeyValue { return OTelStatusDescriptionKey.String(val) } // This semantic convention describes an instance of a function that runs // without provisioning or managing of servers (also known as serverless // functions or Function as a Service (FaaS)) with spans. const ( // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" // semantic conventions. It represents the type of the trigger which caused // this function invocation. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // FaaSInvocationIDKey is the attribute Key conforming to the // "faas.invocation_id" semantic conventions. It represents the invocation // ID of the current function invocation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSInvocationIDKey = attribute.Key("faas.invocation_id") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // FaaSInvocationID returns an attribute KeyValue conforming to the // "faas.invocation_id" semantic conventions. It represents the invocation ID // of the current function invocation. func FaaSInvocationID(val string) attribute.KeyValue { return FaaSInvocationIDKey.String(val) } // Semantic Convention for FaaS triggered as a response to some data source // operation such as a database or filesystem read/write. const ( // FaaSDocumentCollectionKey is the attribute Key conforming to the // "faas.document.collection" semantic conventions. It represents the name // of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in // Cosmos DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // FaaSDocumentOperationKey is the attribute Key conforming to the // "faas.document.operation" semantic conventions. It represents the // describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // FaaSDocumentTimeKey is the attribute Key conforming to the // "faas.document.time" semantic conventions. It represents a string // containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // FaaSDocumentNameKey is the attribute Key conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or // S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // FaaSDocumentCollection returns an attribute KeyValue conforming to the // "faas.document.collection" semantic conventions. It represents the name of // the source on which the triggering operation was performed. For example, in // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the // database name. func FaaSDocumentCollection(val string) attribute.KeyValue { return FaaSDocumentCollectionKey.String(val) } // FaaSDocumentTime returns an attribute KeyValue conforming to the // "faas.document.time" semantic conventions. It represents a string containing // the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSDocumentTime(val string) attribute.KeyValue { return FaaSDocumentTimeKey.String(val) } // FaaSDocumentName returns an attribute KeyValue conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or S3 // is the name of the file, and in Cosmos DB the table name. func FaaSDocumentName(val string) attribute.KeyValue { return FaaSDocumentNameKey.String(val) } // Semantic Convention for FaaS scheduled to be executed regularly. const ( // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic // conventions. It represents a string containing the function invocation // time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic // conventions. It represents a string containing the schedule period as // [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // FaaSTime returns an attribute KeyValue conforming to the "faas.time" // semantic conventions. It represents a string containing the function // invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSTime(val string) attribute.KeyValue { return FaaSTimeKey.String(val) } // FaaSCron returns an attribute KeyValue conforming to the "faas.cron" // semantic conventions. It represents a string containing the schedule period // as [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). func FaaSCron(val string) attribute.KeyValue { return FaaSCronKey.String(val) } // Contains additional attributes for incoming FaaS spans. const ( // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" // semantic conventions. It represents a boolean that is true if the // serverless function is executed for the first time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // FaaSColdstart returns an attribute KeyValue conforming to the // "faas.coldstart" semantic conventions. It represents a boolean that is true // if the serverless function is executed for the first time (aka cold-start). func FaaSColdstart(val bool) attribute.KeyValue { return FaaSColdstartKey.Bool(val) } // Contains additional attributes for outgoing FaaS spans. const ( // FaaSInvokedNameKey is the attribute Key conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the // invoked function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // FaaSInvokedProviderKey is the attribute Key conforming to the // "faas.invoked_provider" semantic conventions. It represents the cloud // provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the // invoked function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // FaaSInvokedRegionKey is the attribute Key conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud // region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like // AWS or GCP, the region in which a function is hosted is essential to // uniquely identify the function and also part of its endpoint. Since it's // part of the endpoint being called, the region is always known to // clients. In these cases, `faas.invoked_region` MUST be set accordingly. // If the region is unknown to the client or not required for identifying // the invoked function, setting `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the // invoked function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // FaaSInvokedName returns an attribute KeyValue conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. func FaaSInvokedName(val string) attribute.KeyValue { return FaaSInvokedNameKey.String(val) } // FaaSInvokedRegion returns an attribute KeyValue conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud region // of the invoked function. func FaaSInvokedRegion(val string) attribute.KeyValue { return FaaSInvokedRegionKey.String(val) } // Operations that access some remote service. const ( // PeerServiceKey is the attribute Key conforming to the "peer.service" // semantic conventions. It represents the // [`service.name`](../../resource/semantic_conventions/README.md#service) // of the remote service. SHOULD be equal to the actual `service.name` // resource attribute of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // PeerService returns an attribute KeyValue conforming to the // "peer.service" semantic conventions. It represents the // [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. func PeerService(val string) attribute.KeyValue { return PeerServiceKey.String(val) } // These attributes may be used for any operation with an authenticated and/or // authorized enduser. const ( // EnduserIDKey is the attribute Key conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted // from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header // in the inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // EnduserRoleKey is the attribute Key conforming to the "enduser.role" // semantic conventions. It represents the actual/assumed role the client // is making the request under extracted from token or application security // context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" // semantic conventions. It represents the scopes or granted authorities // the client currently possesses extracted from token or application // security context. The value would come from the scope associated with an // [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // EnduserID returns an attribute KeyValue conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted from // the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in // the inbound request from outside the system. func EnduserID(val string) attribute.KeyValue { return EnduserIDKey.String(val) } // EnduserRole returns an attribute KeyValue conforming to the // "enduser.role" semantic conventions. It represents the actual/assumed role // the client is making the request under extracted from token or application // security context. func EnduserRole(val string) attribute.KeyValue { return EnduserRoleKey.String(val) } // EnduserScope returns an attribute KeyValue conforming to the // "enduser.scope" semantic conventions. It represents the scopes or granted // authorities the client currently possesses extracted from token or // application security context. The value would come from the scope associated // with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). func EnduserScope(val string) attribute.KeyValue { return EnduserScopeKey.String(val) } // These attributes may be used for any operation to store information about a // thread that started a span. const ( // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic // conventions. It represents the current "managed" thread ID (as opposed // to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // ThreadNameKey is the attribute Key conforming to the "thread.name" // semantic conventions. It represents the current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // ThreadID returns an attribute KeyValue conforming to the "thread.id" // semantic conventions. It represents the current "managed" thread ID (as // opposed to OS thread ID). func ThreadID(val int) attribute.KeyValue { return ThreadIDKey.Int(val) } // ThreadName returns an attribute KeyValue conforming to the "thread.name" // semantic conventions. It represents the current thread name. func ThreadName(val string) attribute.KeyValue { return ThreadNameKey.String(val) } // These attributes allow to report this unit of code and therefore to provide // more context about the span. const ( // CodeFunctionKey is the attribute Key conforming to the "code.function" // semantic conventions. It represents the method or function name, or // equivalent (usually rightmost part of the code unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" // semantic conventions. It represents the "namespace" within which // `code.function` is defined. Usually the qualified class or module name, // such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // CodeFilepathKey is the attribute Key conforming to the "code.filepath" // semantic conventions. It represents the source code file name that // identifies the code unit as uniquely as possible (preferably an absolute // file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") // CodeColumnKey is the attribute Key conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 16 CodeColumnKey = attribute.Key("code.column") ) // CodeFunction returns an attribute KeyValue conforming to the // "code.function" semantic conventions. It represents the method or function // name, or equivalent (usually rightmost part of the code unit's name). func CodeFunction(val string) attribute.KeyValue { return CodeFunctionKey.String(val) } // CodeNamespace returns an attribute KeyValue conforming to the // "code.namespace" semantic conventions. It represents the "namespace" within // which `code.function` is defined. Usually the qualified class or module // name, such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. func CodeNamespace(val string) attribute.KeyValue { return CodeNamespaceKey.String(val) } // CodeFilepath returns an attribute KeyValue conforming to the // "code.filepath" semantic conventions. It represents the source code file // name that identifies the code unit as uniquely as possible (preferably an // absolute file path). func CodeFilepath(val string) attribute.KeyValue { return CodeFilepathKey.String(val) } // CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` best // representing the operation. It SHOULD point within the code unit named in // `code.function`. func CodeLineNumber(val int) attribute.KeyValue { return CodeLineNumberKey.Int(val) } // CodeColumn returns an attribute KeyValue conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit named // in `code.function`. func CodeColumn(val int) attribute.KeyValue { return CodeColumnKey.Int(val) } // Semantic Convention for HTTP Client const ( // HTTPURLKey is the attribute Key conforming to the "http.url" semantic // conventions. It represents the full HTTP request URL in the form // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is // not transmitted over HTTP, but if it is known, it should be included // nevertheless. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the // attribute's value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // HTTPResendCountKey is the attribute Key conforming to the // "http.resend_count" semantic conventions. It represents the ordinal // number of request resending attempt (for any reason, including // redirects). // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 // Note: The resend count SHOULD be updated each time an HTTP request gets // resent by the client, regardless of what was the cause of the resending // (e.g. redirection, authorization failure, 503 Server Unavailable, // network issues, or any other). HTTPResendCountKey = attribute.Key("http.resend_count") ) // HTTPURL returns an attribute KeyValue conforming to the "http.url" // semantic conventions. It represents the full HTTP request URL in the form // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not // transmitted over HTTP, but if it is known, it should be included // nevertheless. func HTTPURL(val string) attribute.KeyValue { return HTTPURLKey.String(val) } // HTTPResendCount returns an attribute KeyValue conforming to the // "http.resend_count" semantic conventions. It represents the ordinal number // of request resending attempt (for any reason, including redirects). func HTTPResendCount(val int) attribute.KeyValue { return HTTPResendCountKey.Int(val) } // Semantic Convention for HTTP Server const ( // HTTPTargetKey is the attribute Key conforming to the "http.target" // semantic conventions. It represents the full request target as passed in // a HTTP request line or equivalent. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '/users/12314/?q=ddds' HTTPTargetKey = attribute.Key("http.target") // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" // semantic conventions. It represents the IP address of the original // client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.sock.peer.addr`, which // would // identify the network-level peer, which may be a proxy. // // This attribute should be set when a source of information different // from the one used for `net.sock.peer.addr`, is available even if that // other // source just confirms the same value as `net.sock.peer.addr`. // Rationale: For `net.sock.peer.addr`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.sock.peer.addr` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // HTTPTarget returns an attribute KeyValue conforming to the "http.target" // semantic conventions. It represents the full request target as passed in a // HTTP request line or equivalent. func HTTPTarget(val string) attribute.KeyValue { return HTTPTargetKey.String(val) } // HTTPClientIP returns an attribute KeyValue conforming to the // "http.client_ip" semantic conventions. It represents the IP address of the // original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). func HTTPClientIP(val string) attribute.KeyValue { return HTTPClientIPKey.String(val) } // The `aws` conventions apply to operations using the AWS SDK. They map // request or response parameters in AWS SDK API calls to attributes on a Span. // The conventions have been collected over time based on feedback from AWS // users of tracing and will continue to evolve as new interesting conventions // are found. // Some descriptions are also provided for populating general OpenTelemetry // semantic conventions based on these APIs. const ( // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" // semantic conventions. It represents the AWS request ID as returned in // the response headers `x-amz-request-id` or `x-amz-requestid`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' AWSRequestIDKey = attribute.Key("aws.request_id") ) // AWSRequestID returns an attribute KeyValue conforming to the // "aws.request_id" semantic conventions. It represents the AWS request ID as // returned in the response headers `x-amz-request-id` or `x-amz-requestid`. func AWSRequestID(val string) attribute.KeyValue { return AWSRequestIDKey.String(val) } // Attributes that exist for multiple DynamoDB request types. const ( // AWSDynamoDBTableNamesKey is the attribute Key conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys // in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number }, "TableName": "string", // "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to // the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` // response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, // "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` // request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. // It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the // value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // AWSDynamoDBProjectionKey is the attribute Key conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value // of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, // RelatedItems, ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // AWSDynamoDBLimitKey is the attribute Key conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of // the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // AWSDynamoDBIndexNameKey is the attribute Key conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value // of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // AWSDynamoDBSelectKey is the attribute Key conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of // the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // AWSDynamoDBTableNames returns an attribute KeyValue conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys in // the `RequestItems` object field. func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { return AWSDynamoDBTableNamesKey.StringSlice(val) } // AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to // the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response field. func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { return AWSDynamoDBConsumedCapacityKey.StringSlice(val) } // AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming // to the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` response // field. func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { return AWSDynamoDBItemCollectionMetricsKey.String(val) } // AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_read_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.ReadCapacityUnits` request parameter. func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) } // AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_write_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) } // AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the value // of the `ConsistentRead` request parameter. func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { return AWSDynamoDBConsistentReadKey.Bool(val) } // AWSDynamoDBProjection returns an attribute KeyValue conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value of // the `ProjectionExpression` request parameter. func AWSDynamoDBProjection(val string) attribute.KeyValue { return AWSDynamoDBProjectionKey.String(val) } // AWSDynamoDBLimit returns an attribute KeyValue conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of the // `Limit` request parameter. func AWSDynamoDBLimit(val int) attribute.KeyValue { return AWSDynamoDBLimitKey.Int(val) } // AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to // the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { return AWSDynamoDBAttributesToGetKey.StringSlice(val) } // AWSDynamoDBIndexName returns an attribute KeyValue conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value of // the `IndexName` request parameter. func AWSDynamoDBIndexName(val string) attribute.KeyValue { return AWSDynamoDBIndexNameKey.String(val) } // AWSDynamoDBSelect returns an attribute KeyValue conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of the // `Select` request parameter. func AWSDynamoDBSelect(val string) attribute.KeyValue { return AWSDynamoDBSelectKey.String(val) } // DynamoDB.CreateTable const ( // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_indexes" semantic // conventions. It represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) } // AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming // to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) } // DynamoDB.ListTables const ( // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents // the value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // AWSDynamoDBTableCountKey is the attribute Key conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming // to the "aws.dynamodb.exclusive_start_table" semantic conventions. It // represents the value of the `ExclusiveStartTableName` request parameter. func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { return AWSDynamoDBExclusiveStartTableKey.String(val) } // AWSDynamoDBTableCount returns an attribute KeyValue conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. func AWSDynamoDBTableCount(val int) attribute.KeyValue { return AWSDynamoDBTableCountKey.Int(val) } // DynamoDB.Query const ( // AWSDynamoDBScanForwardKey is the attribute Key conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the // value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // AWSDynamoDBScanForward returns an attribute KeyValue conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of // the `ScanIndexForward` request parameter. func AWSDynamoDBScanForward(val bool) attribute.KeyValue { return AWSDynamoDBScanForwardKey.Bool(val) } // DynamoDB.Scan const ( // AWSDynamoDBSegmentKey is the attribute Key conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of // the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the // value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // AWSDynamoDBCountKey is the attribute Key conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of // the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // AWSDynamoDBScannedCountKey is the attribute Key conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the // value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // AWSDynamoDBSegment returns an attribute KeyValue conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of the // `Segment` request parameter. func AWSDynamoDBSegment(val int) attribute.KeyValue { return AWSDynamoDBSegmentKey.Int(val) } // AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the value // of the `TotalSegments` request parameter. func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { return AWSDynamoDBTotalSegmentsKey.Int(val) } // AWSDynamoDBCount returns an attribute KeyValue conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of the // `Count` response parameter. func AWSDynamoDBCount(val int) attribute.KeyValue { return AWSDynamoDBCountKey.Int(val) } // AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the value // of the `ScannedCount` response parameter. func AWSDynamoDBScannedCount(val int) attribute.KeyValue { return AWSDynamoDBScannedCountKey.Int(val) } // DynamoDB.UpdateTable const ( // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to // the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the // the `GlobalSecondaryIndexUpdates` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming // to the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) } // AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the the // `GlobalSecondaryIndexUpdates` request field. func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) } // Attributes that exist for S3 request types. const ( // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" // semantic conventions. It represents the S3 bucket name the request // refers to. Corresponds to the `--bucket` parameter of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // operations. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'some-bucket-name' // Note: The `bucket` attribute is applicable to all S3 operations that // reference a bucket, i.e. that require the bucket name as a mandatory // parameter. // This applies to almost all S3 operations except `list-buckets`. AWSS3BucketKey = attribute.Key("aws.s3.bucket") // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic // conventions. It represents the S3 object key the request refers to. // Corresponds to the `--key` parameter of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // operations. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'someFile.yml' // Note: The `key` attribute is applicable to all object-related S3 // operations, i.e. that require the object key as a mandatory parameter. // This applies in particular to the following operations: // // - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) // - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) // - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) // - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) // - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) // - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) // - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) // - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) // - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) // - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) // - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) // - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) // - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) AWSS3KeyKey = attribute.Key("aws.s3.key") // AWSS3CopySourceKey is the attribute Key conforming to the // "aws.s3.copy_source" semantic conventions. It represents the source // object (in the form `bucket`/`key`) for the copy operation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'someFile.yml' // Note: The `copy_source` attribute applies to S3 copy operations and // corresponds to the `--copy-source` parameter // of the [copy-object operation within the S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). // This applies in particular to the following operations: // // - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) // - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") // AWSS3UploadIDKey is the attribute Key conforming to the // "aws.s3.upload_id" semantic conventions. It represents the upload ID // that identifies the multipart upload. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' // Note: The `upload_id` attribute applies to S3 multipart-upload // operations and corresponds to the `--upload-id` parameter // of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // multipart operations. // This applies in particular to the following operations: // // - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) // - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) // - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) // - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) // - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" // semantic conventions. It represents the delete request container that // specifies the objects to be deleted. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' // Note: The `delete` attribute is only applicable to the // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) // operation. // The `delete` attribute corresponds to the `--delete` parameter of the // [delete-objects operation within the S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). AWSS3DeleteKey = attribute.Key("aws.s3.delete") // AWSS3PartNumberKey is the attribute Key conforming to the // "aws.s3.part_number" semantic conventions. It represents the part number // of the part being uploaded in a multipart-upload operation. This is a // positive integer between 1 and 10,000. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3456 // Note: The `part_number` attribute is only applicable to the // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) // and // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) // operations. // The `part_number` attribute corresponds to the `--part-number` parameter // of the // [upload-part operation within the S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") ) // AWSS3Bucket returns an attribute KeyValue conforming to the // "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the // request refers to. Corresponds to the `--bucket` parameter of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // operations. func AWSS3Bucket(val string) attribute.KeyValue { return AWSS3BucketKey.String(val) } // AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" // semantic conventions. It represents the S3 object key the request refers to. // Corresponds to the `--key` parameter of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // operations. func AWSS3Key(val string) attribute.KeyValue { return AWSS3KeyKey.String(val) } // AWSS3CopySource returns an attribute KeyValue conforming to the // "aws.s3.copy_source" semantic conventions. It represents the source object // (in the form `bucket`/`key`) for the copy operation. func AWSS3CopySource(val string) attribute.KeyValue { return AWSS3CopySourceKey.String(val) } // AWSS3UploadID returns an attribute KeyValue conforming to the // "aws.s3.upload_id" semantic conventions. It represents the upload ID that // identifies the multipart upload. func AWSS3UploadID(val string) attribute.KeyValue { return AWSS3UploadIDKey.String(val) } // AWSS3Delete returns an attribute KeyValue conforming to the // "aws.s3.delete" semantic conventions. It represents the delete request // container that specifies the objects to be deleted. func AWSS3Delete(val string) attribute.KeyValue { return AWSS3DeleteKey.String(val) } // AWSS3PartNumber returns an attribute KeyValue conforming to the // "aws.s3.part_number" semantic conventions. It represents the part number of // the part being uploaded in a multipart-upload operation. This is a positive // integer between 1 and 10,000. func AWSS3PartNumber(val int) attribute.KeyValue { return AWSS3PartNumberKey.Int(val) } // Semantic conventions to apply when instrumenting the GraphQL implementation. // They map GraphQL operations to attributes on a Span. const ( // GraphqlOperationNameKey is the attribute Key conforming to the // "graphql.operation.name" semantic conventions. It represents the name of // the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // GraphqlOperationTypeKey is the attribute Key conforming to the // "graphql.operation.type" semantic conventions. It represents the type of // the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // GraphqlDocumentKey is the attribute Key conforming to the // "graphql.document" semantic conventions. It represents the GraphQL // document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // GraphqlOperationName returns an attribute KeyValue conforming to the // "graphql.operation.name" semantic conventions. It represents the name of the // operation being executed. func GraphqlOperationName(val string) attribute.KeyValue { return GraphqlOperationNameKey.String(val) } // GraphqlDocument returns an attribute KeyValue conforming to the // "graphql.document" semantic conventions. It represents the GraphQL document // being executed. func GraphqlDocument(val string) attribute.KeyValue { return GraphqlDocumentKey.String(val) } // General attributes used in messaging systems. const ( // MessagingSystemKey is the attribute Key conforming to the // "messaging.system" semantic conventions. It represents a string // identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // MessagingOperationKey is the attribute Key conforming to the // "messaging.operation" semantic conventions. It represents a string // identifying the kind of messaging operation as defined in the [Operation // names](#operation-names) section above. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: If a custom value is used, it MUST be of low cardinality. MessagingOperationKey = attribute.Key("messaging.operation") // MessagingBatchMessageCountKey is the attribute Key conforming to the // "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the // batching operation. // // Type: int // RequirementLevel: ConditionallyRequired (If the span describes an // operation on a batch of messages.) // Stability: stable // Examples: 0, 1, 2 // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on // spans that operate with a single message. When a messaging client // library supports both batch and single-message API for the same // operation, instrumentations SHOULD use `messaging.batch.message_count` // for batching APIs and SHOULD NOT use it for single-message APIs. MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") ) var ( // publish MessagingOperationPublish = MessagingOperationKey.String("publish") // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // MessagingSystem returns an attribute KeyValue conforming to the // "messaging.system" semantic conventions. It represents a string identifying // the messaging system. func MessagingSystem(val string) attribute.KeyValue { return MessagingSystemKey.String(val) } // MessagingBatchMessageCount returns an attribute KeyValue conforming to // the "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the batching // operation. func MessagingBatchMessageCount(val int) attribute.KeyValue { return MessagingBatchMessageCountKey.Int(val) } // Semantic convention for a consumer of messages received from a messaging // system const ( // MessagingConsumerIDKey is the attribute Key conforming to the // "messaging.consumer.id" semantic conventions. It represents the // identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if // both are present, or only `messaging.kafka.consumer.group`. For brokers, // such as RabbitMQ and Artemis, set it to the `client_id` of the client // consuming the message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") ) // MessagingConsumerID returns an attribute KeyValue conforming to the // "messaging.consumer.id" semantic conventions. It represents the identifier // for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both // are present, or only `messaging.kafka.consumer.group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. func MessagingConsumerID(val string) attribute.KeyValue { return MessagingConsumerIDKey.String(val) } // Semantic conventions for remote procedure calls. const ( // RPCSystemKey is the attribute Key conforming to the "rpc.system" // semantic conventions. It represents a string identifying the remoting // system. See below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // RPCServiceKey is the attribute Key conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the // service being called, including its package name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing // class. The `code.namespace` attribute may be used to store the latter // (despite the attribute name, it may include a class name; e.g., class // with method actually executing the call on the server side, RPC client // stub class on the client side). RPCServiceKey = attribute.Key("rpc.service") // RPCMethodKey is the attribute Key conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method // being called, must be equal to the $method part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the // latter (e.g., method actually executing the call on the server side, RPC // client stub method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") // Connect RPC RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") ) // RPCService returns an attribute KeyValue conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the service // being called, including its package name, if applicable. func RPCService(val string) attribute.KeyValue { return RPCServiceKey.String(val) } // RPCMethod returns an attribute KeyValue conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method being // called, must be equal to the $method part in the span name. func RPCMethod(val string) attribute.KeyValue { return RPCMethodKey.String(val) } // Tech-specific attributes for gRPC. const ( // RPCGRPCStatusCodeKey is the attribute Key conforming to the // "rpc.grpc.status_code" semantic conventions. It represents the [numeric // status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of // the gRPC request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // RPCJsonrpcVersionKey is the attribute Key conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default // version (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // RPCJsonrpcRequestIDKey is the attribute Key conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be // cast to string for simplicity. Use empty string in case of `null` value. // Omit entirely if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPCJsonrpcVersion returns an attribute KeyValue conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. func RPCJsonrpcVersion(val string) attribute.KeyValue { return RPCJsonrpcVersionKey.String(val) } // RPCJsonrpcRequestID returns an attribute KeyValue conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, string, // `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit // entirely if this is a notification. func RPCJsonrpcRequestID(val string) attribute.KeyValue { return RPCJsonrpcRequestIDKey.String(val) } // RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. func RPCJsonrpcErrorCode(val int) attribute.KeyValue { return RPCJsonrpcErrorCodeKey.Int(val) } // RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { return RPCJsonrpcErrorMessageKey.String(val) } // Tech-specific attributes for Connect RPC. const ( // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the // "rpc.connect_rpc.error_code" semantic conventions. It represents the // [error codes](https://connect.build/docs/protocol/#error-codes) of the // Connect request. Error codes are always string values. // // Type: Enum // RequirementLevel: ConditionallyRequired (If response is not successful // and if error code available.) // Stability: stable RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") ) var ( // cancelled RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") // unknown RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") // invalid_argument RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") // deadline_exceeded RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") // not_found RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") // already_exists RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") // permission_denied RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") // resource_exhausted RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") // failed_precondition RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") // aborted RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") // out_of_range RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") // unimplemented RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") // internal RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") // unavailable RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") // data_loss RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") // unauthenticated RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") ) opentelemetry-go-1.21.0/semconv/v1.21.0/000077500000000000000000000000001452547353200174635ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.21.0/attribute_group.go000066400000000000000000002212751452547353200232420ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" import "go.opentelemetry.io/otel/attribute" // These attributes may be used to describe the client in a connection-based // network interaction where there is one side that initiates the connection // (the client is the side that initiates the connection). This covers all TCP // network interactions since TCP is connection-based and one side initiates // the connection (an exception is made for peer-to-peer communication over TCP // where the "user-facing" surface of the protocol / API does not expose a // clear notion of client and server). This also covers UDP network // interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) // and DNS. const ( // ClientAddressKey is the attribute Key conforming to the "client.address" // semantic conventions. It represents the client address - unix domain // socket name, IPv4 or IPv6 address. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/tmp/my.sock', '10.1.2.80' // Note: When observed from the server side, and when communicating through // an intermediary, `client.address` SHOULD represent client address behind // any intermediaries (e.g. proxies) if it's available. ClientAddressKey = attribute.Key("client.address") // ClientPortKey is the attribute Key conforming to the "client.port" // semantic conventions. It represents the client port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 65123 // Note: When observed from the server side, and when communicating through // an intermediary, `client.port` SHOULD represent client port behind any // intermediaries (e.g. proxies) if it's available. ClientPortKey = attribute.Key("client.port") // ClientSocketAddressKey is the attribute Key conforming to the // "client.socket.address" semantic conventions. It represents the // immediate client peer address - unix domain socket name, IPv4 or IPv6 // address. // // Type: string // RequirementLevel: Recommended (If different than `client.address`.) // Stability: stable // Examples: '/tmp/my.sock', '127.0.0.1' ClientSocketAddressKey = attribute.Key("client.socket.address") // ClientSocketPortKey is the attribute Key conforming to the // "client.socket.port" semantic conventions. It represents the immediate // client peer port number // // Type: int // RequirementLevel: Recommended (If different than `client.port`.) // Stability: stable // Examples: 35555 ClientSocketPortKey = attribute.Key("client.socket.port") ) // ClientAddress returns an attribute KeyValue conforming to the // "client.address" semantic conventions. It represents the client address - // unix domain socket name, IPv4 or IPv6 address. func ClientAddress(val string) attribute.KeyValue { return ClientAddressKey.String(val) } // ClientPort returns an attribute KeyValue conforming to the "client.port" // semantic conventions. It represents the client port number func ClientPort(val int) attribute.KeyValue { return ClientPortKey.Int(val) } // ClientSocketAddress returns an attribute KeyValue conforming to the // "client.socket.address" semantic conventions. It represents the immediate // client peer address - unix domain socket name, IPv4 or IPv6 address. func ClientSocketAddress(val string) attribute.KeyValue { return ClientSocketAddressKey.String(val) } // ClientSocketPort returns an attribute KeyValue conforming to the // "client.socket.port" semantic conventions. It represents the immediate // client peer port number func ClientSocketPort(val int) attribute.KeyValue { return ClientSocketPortKey.Int(val) } // Describes deprecated HTTP attributes. const ( // HTTPMethodKey is the attribute Key conforming to the "http.method" // semantic conventions. It represents the deprecated, use // `http.request.method` instead. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // HTTPStatusCodeKey is the attribute Key conforming to the // "http.status_code" semantic conventions. It represents the deprecated, // use `http.response.status_code` instead. // // Type: int // RequirementLevel: Optional // Stability: deprecated // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" // semantic conventions. It represents the deprecated, use `url.scheme` // instead. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // HTTPURLKey is the attribute Key conforming to the "http.url" semantic // conventions. It represents the deprecated, use `url.full` instead. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' HTTPURLKey = attribute.Key("http.url") // HTTPTargetKey is the attribute Key conforming to the "http.target" // semantic conventions. It represents the deprecated, use `url.path` and // `url.query` instead. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '/search?q=OpenTelemetry#SemConv' HTTPTargetKey = attribute.Key("http.target") // HTTPRequestContentLengthKey is the attribute Key conforming to the // "http.request_content_length" semantic conventions. It represents the // deprecated, use `http.request.body.size` instead. // // Type: int // RequirementLevel: Optional // Stability: deprecated // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // HTTPResponseContentLengthKey is the attribute Key conforming to the // "http.response_content_length" semantic conventions. It represents the // deprecated, use `http.response.body.size` instead. // // Type: int // RequirementLevel: Optional // Stability: deprecated // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") ) // HTTPMethod returns an attribute KeyValue conforming to the "http.method" // semantic conventions. It represents the deprecated, use // `http.request.method` instead. func HTTPMethod(val string) attribute.KeyValue { return HTTPMethodKey.String(val) } // HTTPStatusCode returns an attribute KeyValue conforming to the // "http.status_code" semantic conventions. It represents the deprecated, use // `http.response.status_code` instead. func HTTPStatusCode(val int) attribute.KeyValue { return HTTPStatusCodeKey.Int(val) } // HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" // semantic conventions. It represents the deprecated, use `url.scheme` // instead. func HTTPScheme(val string) attribute.KeyValue { return HTTPSchemeKey.String(val) } // HTTPURL returns an attribute KeyValue conforming to the "http.url" // semantic conventions. It represents the deprecated, use `url.full` instead. func HTTPURL(val string) attribute.KeyValue { return HTTPURLKey.String(val) } // HTTPTarget returns an attribute KeyValue conforming to the "http.target" // semantic conventions. It represents the deprecated, use `url.path` and // `url.query` instead. func HTTPTarget(val string) attribute.KeyValue { return HTTPTargetKey.String(val) } // HTTPRequestContentLength returns an attribute KeyValue conforming to the // "http.request_content_length" semantic conventions. It represents the // deprecated, use `http.request.body.size` instead. func HTTPRequestContentLength(val int) attribute.KeyValue { return HTTPRequestContentLengthKey.Int(val) } // HTTPResponseContentLength returns an attribute KeyValue conforming to the // "http.response_content_length" semantic conventions. It represents the // deprecated, use `http.response.body.size` instead. func HTTPResponseContentLength(val int) attribute.KeyValue { return HTTPResponseContentLengthKey.Int(val) } // These attributes may be used for any network related operation. const ( // NetSockPeerNameKey is the attribute Key conforming to the // "net.sock.peer.name" semantic conventions. It represents the deprecated, // use `server.socket.domain` on client spans. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '/var/my.sock' NetSockPeerNameKey = attribute.Key("net.sock.peer.name") // NetSockPeerAddrKey is the attribute Key conforming to the // "net.sock.peer.addr" semantic conventions. It represents the deprecated, // use `server.socket.address` on client spans and `client.socket.address` // on server spans. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '192.168.0.1' NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") // NetSockPeerPortKey is the attribute Key conforming to the // "net.sock.peer.port" semantic conventions. It represents the deprecated, // use `server.socket.port` on client spans and `client.socket.port` on // server spans. // // Type: int // RequirementLevel: Optional // Stability: deprecated // Examples: 65531 NetSockPeerPortKey = attribute.Key("net.sock.peer.port") // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" // semantic conventions. It represents the deprecated, use `server.address` // on client spans and `client.address` on server spans. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'example.com' NetPeerNameKey = attribute.Key("net.peer.name") // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" // semantic conventions. It represents the deprecated, use `server.port` on // client spans and `client.port` on server spans. // // Type: int // RequirementLevel: Optional // Stability: deprecated // Examples: 8080 NetPeerPortKey = attribute.Key("net.peer.port") // NetHostNameKey is the attribute Key conforming to the "net.host.name" // semantic conventions. It represents the deprecated, use // `server.address`. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'example.com' NetHostNameKey = attribute.Key("net.host.name") // NetHostPortKey is the attribute Key conforming to the "net.host.port" // semantic conventions. It represents the deprecated, use `server.port`. // // Type: int // RequirementLevel: Optional // Stability: deprecated // Examples: 8080 NetHostPortKey = attribute.Key("net.host.port") // NetSockHostAddrKey is the attribute Key conforming to the // "net.sock.host.addr" semantic conventions. It represents the deprecated, // use `server.socket.address`. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '/var/my.sock' NetSockHostAddrKey = attribute.Key("net.sock.host.addr") // NetSockHostPortKey is the attribute Key conforming to the // "net.sock.host.port" semantic conventions. It represents the deprecated, // use `server.socket.port`. // // Type: int // RequirementLevel: Optional // Stability: deprecated // Examples: 8080 NetSockHostPortKey = attribute.Key("net.sock.host.port") // NetTransportKey is the attribute Key conforming to the "net.transport" // semantic conventions. It represents the deprecated, use // `network.transport`. // // Type: Enum // RequirementLevel: Optional // Stability: deprecated NetTransportKey = attribute.Key("net.transport") // NetProtocolNameKey is the attribute Key conforming to the // "net.protocol.name" semantic conventions. It represents the deprecated, // use `network.protocol.name`. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'amqp', 'http', 'mqtt' NetProtocolNameKey = attribute.Key("net.protocol.name") // NetProtocolVersionKey is the attribute Key conforming to the // "net.protocol.version" semantic conventions. It represents the // deprecated, use `network.protocol.version`. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '3.1.1' NetProtocolVersionKey = attribute.Key("net.protocol.version") // NetSockFamilyKey is the attribute Key conforming to the // "net.sock.family" semantic conventions. It represents the deprecated, // use `network.transport` and `network.type`. // // Type: Enum // RequirementLevel: Optional // Stability: deprecated NetSockFamilyKey = attribute.Key("net.sock.family") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Named or anonymous pipe NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // IPv4 address NetSockFamilyInet = NetSockFamilyKey.String("inet") // IPv6 address NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") // Unix domain socket path NetSockFamilyUnix = NetSockFamilyKey.String("unix") ) // NetSockPeerName returns an attribute KeyValue conforming to the // "net.sock.peer.name" semantic conventions. It represents the deprecated, use // `server.socket.domain` on client spans. func NetSockPeerName(val string) attribute.KeyValue { return NetSockPeerNameKey.String(val) } // NetSockPeerAddr returns an attribute KeyValue conforming to the // "net.sock.peer.addr" semantic conventions. It represents the deprecated, use // `server.socket.address` on client spans and `client.socket.address` on // server spans. func NetSockPeerAddr(val string) attribute.KeyValue { return NetSockPeerAddrKey.String(val) } // NetSockPeerPort returns an attribute KeyValue conforming to the // "net.sock.peer.port" semantic conventions. It represents the deprecated, use // `server.socket.port` on client spans and `client.socket.port` on server // spans. func NetSockPeerPort(val int) attribute.KeyValue { return NetSockPeerPortKey.Int(val) } // NetPeerName returns an attribute KeyValue conforming to the // "net.peer.name" semantic conventions. It represents the deprecated, use // `server.address` on client spans and `client.address` on server spans. func NetPeerName(val string) attribute.KeyValue { return NetPeerNameKey.String(val) } // NetPeerPort returns an attribute KeyValue conforming to the // "net.peer.port" semantic conventions. It represents the deprecated, use // `server.port` on client spans and `client.port` on server spans. func NetPeerPort(val int) attribute.KeyValue { return NetPeerPortKey.Int(val) } // NetHostName returns an attribute KeyValue conforming to the // "net.host.name" semantic conventions. It represents the deprecated, use // `server.address`. func NetHostName(val string) attribute.KeyValue { return NetHostNameKey.String(val) } // NetHostPort returns an attribute KeyValue conforming to the // "net.host.port" semantic conventions. It represents the deprecated, use // `server.port`. func NetHostPort(val int) attribute.KeyValue { return NetHostPortKey.Int(val) } // NetSockHostAddr returns an attribute KeyValue conforming to the // "net.sock.host.addr" semantic conventions. It represents the deprecated, use // `server.socket.address`. func NetSockHostAddr(val string) attribute.KeyValue { return NetSockHostAddrKey.String(val) } // NetSockHostPort returns an attribute KeyValue conforming to the // "net.sock.host.port" semantic conventions. It represents the deprecated, use // `server.socket.port`. func NetSockHostPort(val int) attribute.KeyValue { return NetSockHostPortKey.Int(val) } // NetProtocolName returns an attribute KeyValue conforming to the // "net.protocol.name" semantic conventions. It represents the deprecated, use // `network.protocol.name`. func NetProtocolName(val string) attribute.KeyValue { return NetProtocolNameKey.String(val) } // NetProtocolVersion returns an attribute KeyValue conforming to the // "net.protocol.version" semantic conventions. It represents the deprecated, // use `network.protocol.version`. func NetProtocolVersion(val string) attribute.KeyValue { return NetProtocolVersionKey.String(val) } // These attributes may be used to describe the receiver of a network // exchange/packet. These should be used when there is no client/server // relationship between the two sides, or when that relationship is unknown. // This covers low-level network interactions (e.g. packet tracing) where you // don't know if there was a connection or which side initiated it. This also // covers unidirectional UDP flows and peer-to-peer communication where the // "user-facing" surface of the protocol / API does not expose a clear notion // of client and server. const ( // DestinationDomainKey is the attribute Key conforming to the // "destination.domain" semantic conventions. It represents the domain name // of the destination system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'foo.example.com' // Note: This value may be a host name, a fully qualified domain name, or // another host naming format. DestinationDomainKey = attribute.Key("destination.domain") // DestinationAddressKey is the attribute Key conforming to the // "destination.address" semantic conventions. It represents the peer // address, for example IP address or UNIX socket name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10.5.3.2' DestinationAddressKey = attribute.Key("destination.address") // DestinationPortKey is the attribute Key conforming to the // "destination.port" semantic conventions. It represents the peer port // number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3389, 2888 DestinationPortKey = attribute.Key("destination.port") ) // DestinationDomain returns an attribute KeyValue conforming to the // "destination.domain" semantic conventions. It represents the domain name of // the destination system. func DestinationDomain(val string) attribute.KeyValue { return DestinationDomainKey.String(val) } // DestinationAddress returns an attribute KeyValue conforming to the // "destination.address" semantic conventions. It represents the peer address, // for example IP address or UNIX socket name. func DestinationAddress(val string) attribute.KeyValue { return DestinationAddressKey.String(val) } // DestinationPort returns an attribute KeyValue conforming to the // "destination.port" semantic conventions. It represents the peer port number func DestinationPort(val int) attribute.KeyValue { return DestinationPortKey.Int(val) } // Describes HTTP attributes. const ( // HTTPRequestMethodKey is the attribute Key conforming to the // "http.request.method" semantic conventions. It represents the hTTP // request method. // // Type: Enum // RequirementLevel: Required // Stability: stable // Examples: 'GET', 'POST', 'HEAD' // Note: HTTP request method value SHOULD be "known" to the // instrumentation. // By default, this convention defines "known" methods as the ones listed // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) // and the PATCH method defined in // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). // // If the HTTP request method is not known to instrumentation, it MUST set // the `http.request.method` attribute to `_OTHER` and, except if reporting // a metric, MUST // set the exact method received in the request line as value of the // `http.request.method_original` attribute. // // If the HTTP instrumentation could end up converting valid HTTP request // methods to `_OTHER`, then it MUST provide a way to override // the list of known HTTP methods. If this override is done via environment // variable, then the environment variable MUST be named // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated // list of case-sensitive known HTTP methods // (this list MUST be a full override of the default known method, it is // not a list of known methods in addition to the defaults). // // HTTP method names are case-sensitive and `http.request.method` attribute // value MUST match a known HTTP method name exactly. // Instrumentations for specific web frameworks that consider HTTP methods // to be case insensitive, SHOULD populate a canonical equivalent. // Tracing instrumentations that do so, MUST also set // `http.request.method_original` to the original value. HTTPRequestMethodKey = attribute.Key("http.request.method") // HTTPResponseStatusCodeKey is the attribute Key conforming to the // "http.response.status_code" semantic conventions. It represents the // [HTTP response status // code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // RequirementLevel: ConditionallyRequired (If and only if one was // received/sent.) // Stability: stable // Examples: 200 HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") ) var ( // CONNECT method HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") // DELETE method HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") // GET method HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") // HEAD method HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") // OPTIONS method HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") // PATCH method HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") // POST method HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") // PUT method HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") // TRACE method HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") // Any HTTP method that the instrumentation has no prior knowledge of HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") ) // HTTPResponseStatusCode returns an attribute KeyValue conforming to the // "http.response.status_code" semantic conventions. It represents the [HTTP // response status code](https://tools.ietf.org/html/rfc7231#section-6). func HTTPResponseStatusCode(val int) attribute.KeyValue { return HTTPResponseStatusCodeKey.Int(val) } // HTTP Server attributes const ( // HTTPRouteKey is the attribute Key conforming to the "http.route" // semantic conventions. It represents the matched route (path template in // the format used by the respective server framework). See note below // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's available) // Stability: stable // Examples: '/users/:userID?', '{controller}/{action}/{id?}' // Note: MUST NOT be populated when this is not supported by the HTTP // server framework as the route attribute should have low-cardinality and // the URI path can NOT substitute it. // SHOULD include the [application // root](/docs/http/http-spans.md#http-server-definitions) if there is one. HTTPRouteKey = attribute.Key("http.route") ) // HTTPRoute returns an attribute KeyValue conforming to the "http.route" // semantic conventions. It represents the matched route (path template in the // format used by the respective server framework). See note below func HTTPRoute(val string) attribute.KeyValue { return HTTPRouteKey.String(val) } // Attributes for Events represented using Log Records. const ( // EventNameKey is the attribute Key conforming to the "event.name" // semantic conventions. It represents the name identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'click', 'exception' EventNameKey = attribute.Key("event.name") // EventDomainKey is the attribute Key conforming to the "event.domain" // semantic conventions. It represents the domain identifies the business // context for the events. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: Events across different domains may have same `event.name`, yet be // unrelated events. EventDomainKey = attribute.Key("event.domain") ) var ( // Events from browser apps EventDomainBrowser = EventDomainKey.String("browser") // Events from mobile apps EventDomainDevice = EventDomainKey.String("device") // Events from Kubernetes EventDomainK8S = EventDomainKey.String("k8s") ) // EventName returns an attribute KeyValue conforming to the "event.name" // semantic conventions. It represents the name identifies the event. func EventName(val string) attribute.KeyValue { return EventNameKey.String(val) } // The attributes described in this section are rather generic. They may be // used in any Log Record they apply to. const ( // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" // semantic conventions. It represents a unique identifier for the Log // Record. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' // Note: If an id is provided, other log records with the same id will be // considered duplicates and can be removed safely. This means, that two // distinguishable log records MUST have different values. // The id MAY be an [Universally Unique Lexicographically Sortable // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers // (e.g. UUID) may be used as needed. LogRecordUIDKey = attribute.Key("log.record.uid") ) // LogRecordUID returns an attribute KeyValue conforming to the // "log.record.uid" semantic conventions. It represents a unique identifier for // the Log Record. func LogRecordUID(val string) attribute.KeyValue { return LogRecordUIDKey.String(val) } // Describes Log attributes const ( // LogIostreamKey is the attribute Key conforming to the "log.iostream" // semantic conventions. It represents the stream associated with the log. // See below for a list of well-known values. // // Type: Enum // RequirementLevel: Optional // Stability: stable LogIostreamKey = attribute.Key("log.iostream") ) var ( // Logs from stdout stream LogIostreamStdout = LogIostreamKey.String("stdout") // Events from stderr stream LogIostreamStderr = LogIostreamKey.String("stderr") ) // A file to which log was emitted. const ( // LogFileNameKey is the attribute Key conforming to the "log.file.name" // semantic conventions. It represents the basename of the file. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'audit.log' LogFileNameKey = attribute.Key("log.file.name") // LogFilePathKey is the attribute Key conforming to the "log.file.path" // semantic conventions. It represents the full path to the file. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/var/log/mysql/audit.log' LogFilePathKey = attribute.Key("log.file.path") // LogFileNameResolvedKey is the attribute Key conforming to the // "log.file.name_resolved" semantic conventions. It represents the // basename of the file, with symlinks resolved. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'uuid.log' LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") // LogFilePathResolvedKey is the attribute Key conforming to the // "log.file.path_resolved" semantic conventions. It represents the full // path to the file, with symlinks resolved. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/var/lib/docker/uuid.log' LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") ) // LogFileName returns an attribute KeyValue conforming to the // "log.file.name" semantic conventions. It represents the basename of the // file. func LogFileName(val string) attribute.KeyValue { return LogFileNameKey.String(val) } // LogFilePath returns an attribute KeyValue conforming to the // "log.file.path" semantic conventions. It represents the full path to the // file. func LogFilePath(val string) attribute.KeyValue { return LogFilePathKey.String(val) } // LogFileNameResolved returns an attribute KeyValue conforming to the // "log.file.name_resolved" semantic conventions. It represents the basename of // the file, with symlinks resolved. func LogFileNameResolved(val string) attribute.KeyValue { return LogFileNameResolvedKey.String(val) } // LogFilePathResolved returns an attribute KeyValue conforming to the // "log.file.path_resolved" semantic conventions. It represents the full path // to the file, with symlinks resolved. func LogFilePathResolved(val string) attribute.KeyValue { return LogFilePathResolvedKey.String(val) } // Describes JVM memory metric attributes. const ( // TypeKey is the attribute Key conforming to the "type" semantic // conventions. It represents the type of memory. // // Type: Enum // RequirementLevel: Recommended // Stability: stable // Examples: 'heap', 'non_heap' TypeKey = attribute.Key("type") // PoolKey is the attribute Key conforming to the "pool" semantic // conventions. It represents the name of the memory pool. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' // Note: Pool names are generally obtained via // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). PoolKey = attribute.Key("pool") ) var ( // Heap memory TypeHeap = TypeKey.String("heap") // Non-heap memory TypeNonHeap = TypeKey.String("non_heap") ) // Pool returns an attribute KeyValue conforming to the "pool" semantic // conventions. It represents the name of the memory pool. func Pool(val string) attribute.KeyValue { return PoolKey.String(val) } // These attributes may be used to describe the server in a connection-based // network interaction where there is one side that initiates the connection // (the client is the side that initiates the connection). This covers all TCP // network interactions since TCP is connection-based and one side initiates // the connection (an exception is made for peer-to-peer communication over TCP // where the "user-facing" surface of the protocol / API does not expose a // clear notion of client and server). This also covers UDP network // interactions where one side initiates the interaction, e.g. QUIC (HTTP/3) // and DNS. const ( // ServerAddressKey is the attribute Key conforming to the "server.address" // semantic conventions. It represents the logical server hostname, matches // server FQDN if available, and IP or socket address if FQDN is not known. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'example.com' ServerAddressKey = attribute.Key("server.address") // ServerPortKey is the attribute Key conforming to the "server.port" // semantic conventions. It represents the logical server port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 80, 8080, 443 ServerPortKey = attribute.Key("server.port") // ServerSocketDomainKey is the attribute Key conforming to the // "server.socket.domain" semantic conventions. It represents the domain // name of an immediate peer. // // Type: string // RequirementLevel: Recommended (If different than `server.address`.) // Stability: stable // Examples: 'proxy.example.com' // Note: Typically observed from the client side, and represents a proxy or // other intermediary domain name. ServerSocketDomainKey = attribute.Key("server.socket.domain") // ServerSocketAddressKey is the attribute Key conforming to the // "server.socket.address" semantic conventions. It represents the physical // server IP address or Unix socket address. If set from the client, should // simply use the socket's peer address, and not attempt to find any actual // server IP (i.e., if set from client, this may represent some proxy // server instead of the logical server). // // Type: string // RequirementLevel: Recommended (If different than `server.address`.) // Stability: stable // Examples: '10.5.3.2' ServerSocketAddressKey = attribute.Key("server.socket.address") // ServerSocketPortKey is the attribute Key conforming to the // "server.socket.port" semantic conventions. It represents the physical // server port. // // Type: int // RequirementLevel: Recommended (If different than `server.port`.) // Stability: stable // Examples: 16456 ServerSocketPortKey = attribute.Key("server.socket.port") ) // ServerAddress returns an attribute KeyValue conforming to the // "server.address" semantic conventions. It represents the logical server // hostname, matches server FQDN if available, and IP or socket address if FQDN // is not known. func ServerAddress(val string) attribute.KeyValue { return ServerAddressKey.String(val) } // ServerPort returns an attribute KeyValue conforming to the "server.port" // semantic conventions. It represents the logical server port number func ServerPort(val int) attribute.KeyValue { return ServerPortKey.Int(val) } // ServerSocketDomain returns an attribute KeyValue conforming to the // "server.socket.domain" semantic conventions. It represents the domain name // of an immediate peer. func ServerSocketDomain(val string) attribute.KeyValue { return ServerSocketDomainKey.String(val) } // ServerSocketAddress returns an attribute KeyValue conforming to the // "server.socket.address" semantic conventions. It represents the physical // server IP address or Unix socket address. If set from the client, should // simply use the socket's peer address, and not attempt to find any actual // server IP (i.e., if set from client, this may represent some proxy server // instead of the logical server). func ServerSocketAddress(val string) attribute.KeyValue { return ServerSocketAddressKey.String(val) } // ServerSocketPort returns an attribute KeyValue conforming to the // "server.socket.port" semantic conventions. It represents the physical server // port. func ServerSocketPort(val int) attribute.KeyValue { return ServerSocketPortKey.Int(val) } // These attributes may be used to describe the sender of a network // exchange/packet. These should be used when there is no client/server // relationship between the two sides, or when that relationship is unknown. // This covers low-level network interactions (e.g. packet tracing) where you // don't know if there was a connection or which side initiated it. This also // covers unidirectional UDP flows and peer-to-peer communication where the // "user-facing" surface of the protocol / API does not expose a clear notion // of client and server. const ( // SourceDomainKey is the attribute Key conforming to the "source.domain" // semantic conventions. It represents the domain name of the source // system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'foo.example.com' // Note: This value may be a host name, a fully qualified domain name, or // another host naming format. SourceDomainKey = attribute.Key("source.domain") // SourceAddressKey is the attribute Key conforming to the "source.address" // semantic conventions. It represents the source address, for example IP // address or Unix socket name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10.5.3.2' SourceAddressKey = attribute.Key("source.address") // SourcePortKey is the attribute Key conforming to the "source.port" // semantic conventions. It represents the source port number // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3389, 2888 SourcePortKey = attribute.Key("source.port") ) // SourceDomain returns an attribute KeyValue conforming to the // "source.domain" semantic conventions. It represents the domain name of the // source system. func SourceDomain(val string) attribute.KeyValue { return SourceDomainKey.String(val) } // SourceAddress returns an attribute KeyValue conforming to the // "source.address" semantic conventions. It represents the source address, for // example IP address or Unix socket name. func SourceAddress(val string) attribute.KeyValue { return SourceAddressKey.String(val) } // SourcePort returns an attribute KeyValue conforming to the "source.port" // semantic conventions. It represents the source port number func SourcePort(val int) attribute.KeyValue { return SourcePortKey.Int(val) } // These attributes may be used for any network related operation. const ( // NetworkTransportKey is the attribute Key conforming to the // "network.transport" semantic conventions. It represents the [OSI // Transport Layer](https://osi-model.com/transport-layer/) or // [Inter-process Communication // method](https://en.wikipedia.org/wiki/Inter-process_communication). The // value SHOULD be normalized to lowercase. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'tcp', 'udp' NetworkTransportKey = attribute.Key("network.transport") // NetworkTypeKey is the attribute Key conforming to the "network.type" // semantic conventions. It represents the [OSI Network // Layer](https://osi-model.com/network-layer/) or non-OSI equivalent. The // value SHOULD be normalized to lowercase. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'ipv4', 'ipv6' NetworkTypeKey = attribute.Key("network.type") // NetworkProtocolNameKey is the attribute Key conforming to the // "network.protocol.name" semantic conventions. It represents the [OSI // Application Layer](https://osi-model.com/application-layer/) or non-OSI // equivalent. The value SHOULD be normalized to lowercase. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'amqp', 'http', 'mqtt' NetworkProtocolNameKey = attribute.Key("network.protocol.name") // NetworkProtocolVersionKey is the attribute Key conforming to the // "network.protocol.version" semantic conventions. It represents the // version of the application layer protocol used. See note below. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3.1.1' // Note: `network.protocol.version` refers to the version of the protocol // used and might be different from the protocol client's version. If the // HTTP client used has a version of `0.27.2`, but sends HTTP version // `1.1`, this attribute should be set to `1.1`. NetworkProtocolVersionKey = attribute.Key("network.protocol.version") ) var ( // TCP NetworkTransportTCP = NetworkTransportKey.String("tcp") // UDP NetworkTransportUDP = NetworkTransportKey.String("udp") // Named or anonymous pipe. See note below NetworkTransportPipe = NetworkTransportKey.String("pipe") // Unix domain socket NetworkTransportUnix = NetworkTransportKey.String("unix") ) var ( // IPv4 NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") // IPv6 NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") ) // NetworkProtocolName returns an attribute KeyValue conforming to the // "network.protocol.name" semantic conventions. It represents the [OSI // Application Layer](https://osi-model.com/application-layer/) or non-OSI // equivalent. The value SHOULD be normalized to lowercase. func NetworkProtocolName(val string) attribute.KeyValue { return NetworkProtocolNameKey.String(val) } // NetworkProtocolVersion returns an attribute KeyValue conforming to the // "network.protocol.version" semantic conventions. It represents the version // of the application layer protocol used. See note below. func NetworkProtocolVersion(val string) attribute.KeyValue { return NetworkProtocolVersionKey.String(val) } // These attributes may be used for any network related operation. const ( // NetworkConnectionTypeKey is the attribute Key conforming to the // "network.connection.type" semantic conventions. It represents the // internet connection type. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'wifi' NetworkConnectionTypeKey = attribute.Key("network.connection.type") // NetworkConnectionSubtypeKey is the attribute Key conforming to the // "network.connection.subtype" semantic conventions. It represents the // this describes more details regarding the connection.type. It may be the // type of cell technology connection, but it could be used for describing // details about a wifi connection. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'LTE' NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") // NetworkCarrierNameKey is the attribute Key conforming to the // "network.carrier.name" semantic conventions. It represents the name of // the mobile carrier. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'sprint' NetworkCarrierNameKey = attribute.Key("network.carrier.name") // NetworkCarrierMccKey is the attribute Key conforming to the // "network.carrier.mcc" semantic conventions. It represents the mobile // carrier country code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '310' NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") // NetworkCarrierMncKey is the attribute Key conforming to the // "network.carrier.mnc" semantic conventions. It represents the mobile // carrier network code. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '001' NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") // NetworkCarrierIccKey is the attribute Key conforming to the // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 // alpha-2 2-character country code associated with the mobile carrier // network. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'DE' NetworkCarrierIccKey = attribute.Key("network.carrier.icc") ) var ( // wifi NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") // wired NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") // cell NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") // unavailable NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") // unknown NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") ) var ( // GPRS NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") // EDGE NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") // UMTS NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") // CDMA NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") // HSUPA NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") // HSPA NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") // IDEN NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") // EVDO Rev. B NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") // LTE NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") // EHRPD NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") // HSPAP NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") // GSM NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") // TD-SCDMA NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") // IWLAN NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") // LTE CA NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") ) // NetworkCarrierName returns an attribute KeyValue conforming to the // "network.carrier.name" semantic conventions. It represents the name of the // mobile carrier. func NetworkCarrierName(val string) attribute.KeyValue { return NetworkCarrierNameKey.String(val) } // NetworkCarrierMcc returns an attribute KeyValue conforming to the // "network.carrier.mcc" semantic conventions. It represents the mobile carrier // country code. func NetworkCarrierMcc(val string) attribute.KeyValue { return NetworkCarrierMccKey.String(val) } // NetworkCarrierMnc returns an attribute KeyValue conforming to the // "network.carrier.mnc" semantic conventions. It represents the mobile carrier // network code. func NetworkCarrierMnc(val string) attribute.KeyValue { return NetworkCarrierMncKey.String(val) } // NetworkCarrierIcc returns an attribute KeyValue conforming to the // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 // alpha-2 2-character country code associated with the mobile carrier network. func NetworkCarrierIcc(val string) attribute.KeyValue { return NetworkCarrierIccKey.String(val) } // Semantic conventions for HTTP client and server Spans. const ( // HTTPRequestMethodOriginalKey is the attribute Key conforming to the // "http.request.method_original" semantic conventions. It represents the // original HTTP method sent by the client in the request line. // // Type: string // RequirementLevel: ConditionallyRequired (If and only if it's different // than `http.request.method`.) // Stability: stable // Examples: 'GeT', 'ACL', 'foo' HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") // HTTPRequestBodySizeKey is the attribute Key conforming to the // "http.request.body.size" semantic conventions. It represents the size of // the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") // HTTPResponseBodySizeKey is the attribute Key conforming to the // "http.response.body.size" semantic conventions. It represents the size // of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as // the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the // compressed size. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3495 HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") ) // HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the // "http.request.method_original" semantic conventions. It represents the // original HTTP method sent by the client in the request line. func HTTPRequestMethodOriginal(val string) attribute.KeyValue { return HTTPRequestMethodOriginalKey.String(val) } // HTTPRequestBodySize returns an attribute KeyValue conforming to the // "http.request.body.size" semantic conventions. It represents the size of the // request payload body in bytes. This is the number of bytes transferred // excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPRequestBodySize(val int) attribute.KeyValue { return HTTPRequestBodySizeKey.Int(val) } // HTTPResponseBodySize returns an attribute KeyValue conforming to the // "http.response.body.size" semantic conventions. It represents the size of // the response payload body in bytes. This is the number of bytes transferred // excluding headers and is often, but not always, present as the // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) // header. For requests using transport encoding, this should be the compressed // size. func HTTPResponseBodySize(val int) attribute.KeyValue { return HTTPResponseBodySizeKey.Int(val) } // Semantic convention describing per-message attributes populated on messaging // spans or links. const ( // MessagingMessageIDKey is the attribute Key conforming to the // "messaging.message.id" semantic conventions. It represents a value used // by the messaging system as an identifier for the message, represented as // a string. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message.id") // MessagingMessageConversationIDKey is the attribute Key conforming to the // "messaging.message.conversation_id" semantic conventions. It represents // the [conversation ID](#conversations) identifying the conversation to // which the message belongs, represented as a string. Sometimes called // "Correlation ID". // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyConversationID' MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to // the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also // use this attribute if it is unknown whether the compressed or // uncompressed payload size is reported. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key // conforming to the "messaging.message.payload_compressed_size_bytes" // semantic conventions. It represents the compressed size of the message // payload in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") ) // MessagingMessageID returns an attribute KeyValue conforming to the // "messaging.message.id" semantic conventions. It represents a value used by // the messaging system as an identifier for the message, represented as a // string. func MessagingMessageID(val string) attribute.KeyValue { return MessagingMessageIDKey.String(val) } // MessagingMessageConversationID returns an attribute KeyValue conforming // to the "messaging.message.conversation_id" semantic conventions. It // represents the [conversation ID](#conversations) identifying the // conversation to which the message belongs, represented as a string. // Sometimes called "Correlation ID". func MessagingMessageConversationID(val string) attribute.KeyValue { return MessagingMessageConversationIDKey.String(val) } // MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming // to the "messaging.message.payload_size_bytes" semantic conventions. It // represents the (uncompressed) size of the message payload in bytes. Also use // this attribute if it is unknown whether the compressed or uncompressed // payload size is reported. func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadSizeBytesKey.Int(val) } // MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue // conforming to the "messaging.message.payload_compressed_size_bytes" semantic // conventions. It represents the compressed size of the message payload in // bytes. func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) } // Semantic convention for attributes that describe messaging destination on // broker const ( // MessagingDestinationNameKey is the attribute Key conforming to the // "messaging.destination.name" semantic conventions. It represents the // message destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MyQueue', 'MyTopic' // Note: Destination name SHOULD uniquely identify a specific queue, topic // or other entity within the broker. If // the broker does not have such notion, the destination name SHOULD // uniquely identify the broker. MessagingDestinationNameKey = attribute.Key("messaging.destination.name") // MessagingDestinationTemplateKey is the attribute Key conforming to the // "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/customers/{customerID}' // Note: Destination names could be constructed from templates. An example // would be a destination name involving a user name or product id. // Although the destination name in this case is of high cardinality, the // underlying template is of low cardinality and can be effectively used // for grouping and aggregation. MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") // MessagingDestinationTemporaryKey is the attribute Key conforming to the // "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might // not exist anymore after messages are processed. // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") // MessagingDestinationAnonymousKey is the attribute Key conforming to the // "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). // // Type: boolean // RequirementLevel: Optional // Stability: stable MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") ) // MessagingDestinationName returns an attribute KeyValue conforming to the // "messaging.destination.name" semantic conventions. It represents the message // destination name func MessagingDestinationName(val string) attribute.KeyValue { return MessagingDestinationNameKey.String(val) } // MessagingDestinationTemplate returns an attribute KeyValue conforming to // the "messaging.destination.template" semantic conventions. It represents the // low cardinality representation of the messaging destination name func MessagingDestinationTemplate(val string) attribute.KeyValue { return MessagingDestinationTemplateKey.String(val) } // MessagingDestinationTemporary returns an attribute KeyValue conforming to // the "messaging.destination.temporary" semantic conventions. It represents a // boolean that is true if the message destination is temporary and might not // exist anymore after messages are processed. func MessagingDestinationTemporary(val bool) attribute.KeyValue { return MessagingDestinationTemporaryKey.Bool(val) } // MessagingDestinationAnonymous returns an attribute KeyValue conforming to // the "messaging.destination.anonymous" semantic conventions. It represents a // boolean that is true if the message destination is anonymous (could be // unnamed or have auto-generated name). func MessagingDestinationAnonymous(val bool) attribute.KeyValue { return MessagingDestinationAnonymousKey.Bool(val) } // Attributes for RabbitMQ const ( // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. // // Type: string // RequirementLevel: ConditionallyRequired (If not empty.) // Stability: stable // Examples: 'myKey' MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") ) // MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue // conforming to the "messaging.rabbitmq.destination.routing_key" semantic // conventions. It represents the rabbitMQ message routing key. func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { return MessagingRabbitmqDestinationRoutingKeyKey.String(val) } // Attributes for Apache Kafka const ( // MessagingKafkaMessageKeyKey is the attribute Key conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure // they're processed on the same partition. They differ from // `messaging.message.id` in that they're not unique. If the key is `null`, // the attribute MUST NOT be set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to // be supplied for the attribute. If the key has no unambiguous, canonical // string form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the // "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only // applies to consumers, not producers. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to // the "messaging.kafka.destination.partition" semantic conventions. It // represents the partition the message is sent to. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 2 MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the // "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the // "messaging.kafka.message.tombstone" semantic conventions. It represents // a boolean that is true if the message is a tombstone. // // Type: boolean // RequirementLevel: ConditionallyRequired (If value is `true`. When // missing, the value is assumed to be `false`.) // Stability: stable MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") ) // MessagingKafkaMessageKey returns an attribute KeyValue conforming to the // "messaging.kafka.message.key" semantic conventions. It represents the // message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message.id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be // set. func MessagingKafkaMessageKey(val string) attribute.KeyValue { return MessagingKafkaMessageKeyKey.String(val) } // MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to // the "messaging.kafka.consumer.group" semantic conventions. It represents the // name of the Kafka Consumer Group that is handling the message. Only applies // to consumers, not producers. func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { return MessagingKafkaConsumerGroupKey.String(val) } // MessagingKafkaDestinationPartition returns an attribute KeyValue // conforming to the "messaging.kafka.destination.partition" semantic // conventions. It represents the partition the message is sent to. func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { return MessagingKafkaDestinationPartitionKey.Int(val) } // MessagingKafkaMessageOffset returns an attribute KeyValue conforming to // the "messaging.kafka.message.offset" semantic conventions. It represents the // offset of a record in the corresponding Kafka partition. func MessagingKafkaMessageOffset(val int) attribute.KeyValue { return MessagingKafkaMessageOffsetKey.Int(val) } // MessagingKafkaMessageTombstone returns an attribute KeyValue conforming // to the "messaging.kafka.message.tombstone" semantic conventions. It // represents a boolean that is true if the message is a tombstone. func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { return MessagingKafkaMessageTombstoneKey.Bool(val) } // Attributes for Apache RocketMQ const ( // MessagingRocketmqNamespaceKey is the attribute Key conforming to the // "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // MessagingRocketmqClientGroupKey is the attribute Key conforming to the // "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key // conforming to the "messaging.rocketmq.message.delivery_timestamp" // semantic conventions. It represents the timestamp in milliseconds that // the delay message is expected to be delivered to consumer. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delay time level is not specified.) // Stability: stable // Examples: 1665987217045 MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. // // Type: int // RequirementLevel: ConditionallyRequired (If the message type is delay // and delivery timestamp is not specified.) // Stability: stable // Examples: 3 MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the // "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. // // Type: string // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) // Stability: stable // Examples: 'myMessageGroup' MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the // "messaging.rocketmq.message.type" semantic conventions. It represents // the type of message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") // MessagingRocketmqMessageTagKey is the attribute Key conforming to the // "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the // "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to // the "messaging.rocketmq.consumption_model" semantic conventions. It // represents the model of message consumption. This only applies to // consumer spans. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // MessagingRocketmqNamespace returns an attribute KeyValue conforming to // the "messaging.rocketmq.namespace" semantic conventions. It represents the // namespace of RocketMQ resources, resources in different namespaces are // individual. func MessagingRocketmqNamespace(val string) attribute.KeyValue { return MessagingRocketmqNamespaceKey.String(val) } // MessagingRocketmqClientGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.client_group" semantic conventions. It represents // the name of the RocketMQ producer/consumer group that is handling the // message. The client type is identified by the SpanKind. func MessagingRocketmqClientGroup(val string) attribute.KeyValue { return MessagingRocketmqClientGroupKey.String(val) } // MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic // conventions. It represents the timestamp in milliseconds that the delay // message is expected to be delivered to consumer. func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) } // MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue // conforming to the "messaging.rocketmq.message.delay_time_level" semantic // conventions. It represents the delay time level for delay message, which // determines the message delay time. func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) } // MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.group" semantic conventions. It represents // the it is essential for FIFO message. Messages that belong to the same // message group are always processed one by one within the same consumer // group. func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { return MessagingRocketmqMessageGroupKey.String(val) } // MessagingRocketmqMessageTag returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.tag" semantic conventions. It represents the // secondary classifier of message besides topic. func MessagingRocketmqMessageTag(val string) attribute.KeyValue { return MessagingRocketmqMessageTagKey.String(val) } // MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to // the "messaging.rocketmq.message.keys" semantic conventions. It represents // the key(s) of message, another way to mark message besides message id. func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { return MessagingRocketmqMessageKeysKey.StringSlice(val) } // Attributes describing URL. const ( // URLSchemeKey is the attribute Key conforming to the "url.scheme" // semantic conventions. It represents the [URI // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component // identifying the used protocol. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'https', 'ftp', 'telnet' URLSchemeKey = attribute.Key("url.scheme") // URLFullKey is the attribute Key conforming to the "url.full" semantic // conventions. It represents the absolute URL describing a network // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', // '//localhost' // Note: For network calls, URL usually has // `scheme://host[:port][path][?query][#fragment]` format, where the // fragment is not transmitted over HTTP, but if it is known, it should be // included nevertheless. // `url.full` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case username and // password should be redacted and attribute's value should be // `https://REDACTED:REDACTED@www.example.com/`. // `url.full` SHOULD capture the absolute URL when it is available (or can // be reconstructed) and SHOULD NOT be validated or modified except for // sanitizing purposes. URLFullKey = attribute.Key("url.full") // URLPathKey is the attribute Key conforming to the "url.path" semantic // conventions. It represents the [URI // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/search' // Note: When missing, the value is assumed to be `/` URLPathKey = attribute.Key("url.path") // URLQueryKey is the attribute Key conforming to the "url.query" semantic // conventions. It represents the [URI // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'q=OpenTelemetry' // Note: Sensitive content provided in query string SHOULD be scrubbed when // instrumentations can identify it. URLQueryKey = attribute.Key("url.query") // URLFragmentKey is the attribute Key conforming to the "url.fragment" // semantic conventions. It represents the [URI // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'SemConv' URLFragmentKey = attribute.Key("url.fragment") ) // URLScheme returns an attribute KeyValue conforming to the "url.scheme" // semantic conventions. It represents the [URI // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component // identifying the used protocol. func URLScheme(val string) attribute.KeyValue { return URLSchemeKey.String(val) } // URLFull returns an attribute KeyValue conforming to the "url.full" // semantic conventions. It represents the absolute URL describing a network // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) func URLFull(val string) attribute.KeyValue { return URLFullKey.String(val) } // URLPath returns an attribute KeyValue conforming to the "url.path" // semantic conventions. It represents the [URI // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component func URLPath(val string) attribute.KeyValue { return URLPathKey.String(val) } // URLQuery returns an attribute KeyValue conforming to the "url.query" // semantic conventions. It represents the [URI // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component func URLQuery(val string) attribute.KeyValue { return URLQueryKey.String(val) } // URLFragment returns an attribute KeyValue conforming to the // "url.fragment" semantic conventions. It represents the [URI // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component func URLFragment(val string) attribute.KeyValue { return URLFragmentKey.String(val) } // Describes user-agent attributes. const ( // UserAgentOriginalKey is the attribute Key conforming to the // "user_agent.original" semantic conventions. It represents the value of // the [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' UserAgentOriginalKey = attribute.Key("user_agent.original") ) // UserAgentOriginal returns an attribute KeyValue conforming to the // "user_agent.original" semantic conventions. It represents the value of the // [HTTP // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) // header sent by the client. func UserAgentOriginal(val string) attribute.KeyValue { return UserAgentOriginalKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.21.0/doc.go000066400000000000000000000016641452547353200205660ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.21.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" opentelemetry-go-1.21.0/semconv/v1.21.0/event.go000066400000000000000000000173141452547353200211410ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" import "go.opentelemetry.io/otel/attribute" // This semantic convention defines the attributes used to represent a feature // flag evaluation as an event. const ( // FeatureFlagKeyKey is the attribute Key conforming to the // "feature_flag.key" semantic conventions. It represents the unique // identifier of the feature flag. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'logo-color' FeatureFlagKeyKey = attribute.Key("feature_flag.key") // FeatureFlagProviderNameKey is the attribute Key conforming to the // "feature_flag.provider_name" semantic conventions. It represents the // name of the service provider that performs the flag evaluation. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'Flag Manager' FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") // FeatureFlagVariantKey is the attribute Key conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be // a semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'red', 'true', 'on' // Note: A semantic identifier, commonly referred to as a variant, provides // a means // for referring to a value without including the value itself. This can // provide additional context for understanding the meaning behind a value. // For example, the variant `red` maybe be used for the value `#c05543`. // // A stringified version of the value can be used in situations where a // semantic identifier is unavailable. String representation of the value // should be determined by the implementer. FeatureFlagVariantKey = attribute.Key("feature_flag.variant") ) // FeatureFlagKey returns an attribute KeyValue conforming to the // "feature_flag.key" semantic conventions. It represents the unique identifier // of the feature flag. func FeatureFlagKey(val string) attribute.KeyValue { return FeatureFlagKeyKey.String(val) } // FeatureFlagProviderName returns an attribute KeyValue conforming to the // "feature_flag.provider_name" semantic conventions. It represents the name of // the service provider that performs the flag evaluation. func FeatureFlagProviderName(val string) attribute.KeyValue { return FeatureFlagProviderNameKey.String(val) } // FeatureFlagVariant returns an attribute KeyValue conforming to the // "feature_flag.variant" semantic conventions. It represents the sHOULD be a // semantic identifier for a value. If one is unavailable, a stringified // version of the value can be used. func FeatureFlagVariant(val string) attribute.KeyValue { return FeatureFlagVariantKey.String(val) } // RPC received/sent message. const ( // MessageTypeKey is the attribute Key conforming to the "message.type" // semantic conventions. It represents the whether this is a received or // sent message. // // Type: Enum // RequirementLevel: Optional // Stability: stable MessageTypeKey = attribute.Key("message.type") // MessageIDKey is the attribute Key conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two // different counters starting from `1` one for sent messages and one for // received message. // // Type: int // RequirementLevel: Optional // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // MessageCompressedSizeKey is the attribute Key conforming to the // "message.compressed_size" semantic conventions. It represents the // compressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // MessageUncompressedSizeKey is the attribute Key conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. // // Type: int // RequirementLevel: Optional // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) // MessageID returns an attribute KeyValue conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two different // counters starting from `1` one for sent messages and one for received // message. func MessageID(val int) attribute.KeyValue { return MessageIDKey.Int(val) } // MessageCompressedSize returns an attribute KeyValue conforming to the // "message.compressed_size" semantic conventions. It represents the compressed // size of the message in bytes. func MessageCompressedSize(val int) attribute.KeyValue { return MessageCompressedSizeKey.Int(val) } // MessageUncompressedSize returns an attribute KeyValue conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. func MessageUncompressedSize(val int) attribute.KeyValue { return MessageUncompressedSizeKey.Int(val) } // The attributes used to report a single exception associated with a span. const ( // ExceptionEscapedKey is the attribute Key conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be // set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of // a span, // if that span is ended while the exception is still logically "in // flight". // This may be actually "in flight" in some languages (e.g. if the // exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most // languages. // // It is usually not possible to determine at the point where an exception // is thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending // the span, // as done in the [example above](#recording-an-exception). // // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // ExceptionEscaped returns an attribute KeyValue conforming to the // "exception.escaped" semantic conventions. It represents the sHOULD be set to // true if the exception event is recorded at a point where it is known that // the exception is escaping the scope of the span. func ExceptionEscaped(val bool) attribute.KeyValue { return ExceptionEscapedKey.Bool(val) } opentelemetry-go-1.21.0/semconv/v1.21.0/exception.go000066400000000000000000000014301452547353200220060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.21.0/resource.go000066400000000000000000002627051452547353200216550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" import "go.opentelemetry.io/otel/attribute" // The web browser in which the application represented by the resource is // running. The `browser.*` attributes MUST be used only for resources that // represent applications running in a web browser (regardless of whether // running on a mobile or desktop device). const ( // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" // semantic conventions. It represents the array of brand name and version // separated by a space // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.brands`). BrowserBrandsKey = attribute.Key("browser.brands") // BrowserPlatformKey is the attribute Key conforming to the // "browser.platform" semantic conventions. It represents the platform on // which the browser is running // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Windows', 'macOS', 'Android' // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.platform`). If unavailable, the legacy // `navigator.platform` API SHOULD NOT be used instead and this attribute // SHOULD be left unset in order for the values to be consistent. // The list of possible values is defined in the [W3C User-Agent Client // Hints // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). // Note that some (but not all) of these values can overlap with values in // the [`os.type` and `os.name` attributes](./os.md). However, for // consistency, the values in the `browser.platform` attribute should // capture the exact value that the user agent provides. BrowserPlatformKey = attribute.Key("browser.platform") // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" // semantic conventions. It represents a boolean that is true if the // browser is running on a mobile device // // Type: boolean // RequirementLevel: Optional // Stability: stable // Note: This value is intended to be taken from the [UA client hints // API](https://wicg.github.io/ua-client-hints/#interface) // (`navigator.userAgentData.mobile`). If unavailable, this attribute // SHOULD be left unset. BrowserMobileKey = attribute.Key("browser.mobile") // BrowserLanguageKey is the attribute Key conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'en', 'en-US', 'fr', 'fr-FR' // Note: This value is intended to be taken from the Navigator API // `navigator.language`. BrowserLanguageKey = attribute.Key("browser.language") ) // BrowserBrands returns an attribute KeyValue conforming to the // "browser.brands" semantic conventions. It represents the array of brand name // and version separated by a space func BrowserBrands(val ...string) attribute.KeyValue { return BrowserBrandsKey.StringSlice(val) } // BrowserPlatform returns an attribute KeyValue conforming to the // "browser.platform" semantic conventions. It represents the platform on which // the browser is running func BrowserPlatform(val string) attribute.KeyValue { return BrowserPlatformKey.String(val) } // BrowserMobile returns an attribute KeyValue conforming to the // "browser.mobile" semantic conventions. It represents a boolean that is true // if the browser is running on a mobile device func BrowserMobile(val bool) attribute.KeyValue { return BrowserMobileKey.Bool(val) } // BrowserLanguage returns an attribute KeyValue conforming to the // "browser.language" semantic conventions. It represents the preferred // language of the user using the browser func BrowserLanguage(val string) attribute.KeyValue { return BrowserLanguageKey.String(val) } // A cloud environment (e.g. GCP, Azure, AWS) const ( // CloudProviderKey is the attribute Key conforming to the "cloud.provider" // semantic conventions. It represents the name of the cloud provider. // // Type: Enum // RequirementLevel: Optional // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // CloudAccountIDKey is the attribute Key conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account // ID the resource is assigned to. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // CloudRegionKey is the attribute Key conforming to the "cloud.region" // semantic conventions. It represents the geographical region the resource // is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for // example [Alibaba Cloud // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), // [Google Cloud regions](https://cloud.google.com/about/locations), or // [Tencent Cloud // regions](https://www.tencentcloud.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // CloudResourceIDKey is the attribute Key conforming to the // "cloud.resource_id" semantic conventions. It represents the cloud // provider-specific native identifier of the monitored cloud resource // (e.g. an // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // on AWS, a [fully qualified resource // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // on Azure, a [full resource // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) // on GCP) // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' // Note: On some cloud providers, it may not be possible to determine the // full ID at startup, // so it may be necessary to set `cloud.resource_id` as a span attribute // instead. // // The exact value to use for `cloud.resource_id` depends on the cloud // provider. // The following well-known definitions MUST be used if you set this // attribute and they apply: // // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) // with the resolved function version, as the same runtime instance may // be invokable with // multiple different aliases. // * **GCP:** The [URI of the // resource](https://cloud.google.com/iam/docs/full-resource-names) // * **Azure:** The [Fully Qualified Resource // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // of the invoked function, // *not* the function app, having the form // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider. CloudResourceIDKey = attribute.Key("cloud.resource_id") // CloudAvailabilityZoneKey is the attribute Key conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the // resource is running. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google // Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" // semantic conventions. It represents the cloud platform in use. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Heroku Platform as a Service CloudProviderHeroku = CloudProviderKey.String("heroku") // IBM Cloud CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // Red Hat OpenShift on Alibaba Cloud CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Red Hat OpenShift on AWS (ROSA) CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Azure Red Hat OpenShift CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") // Google Bare Metal Solution (BMS) CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Red Hat OpenShift on Google Cloud CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") // Red Hat OpenShift on IBM Cloud CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // CloudAccountID returns an attribute KeyValue conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account ID // the resource is assigned to. func CloudAccountID(val string) attribute.KeyValue { return CloudAccountIDKey.String(val) } // CloudRegion returns an attribute KeyValue conforming to the // "cloud.region" semantic conventions. It represents the geographical region // the resource is running. func CloudRegion(val string) attribute.KeyValue { return CloudRegionKey.String(val) } // CloudResourceID returns an attribute KeyValue conforming to the // "cloud.resource_id" semantic conventions. It represents the cloud // provider-specific native identifier of the monitored cloud resource (e.g. an // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // on AWS, a [fully qualified resource // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) // on Azure, a [full resource // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) // on GCP) func CloudResourceID(val string) attribute.KeyValue { return CloudResourceIDKey.String(val) } // CloudAvailabilityZone returns an attribute KeyValue conforming to the // "cloud.availability_zone" semantic conventions. It represents the cloud // regions often have multiple, isolated locations known as zones to increase // availability. Availability zone represents the zone where the resource is // running. func CloudAvailabilityZone(val string) attribute.KeyValue { return CloudAvailabilityZoneKey.String(val) } // Resources used by AWS Elastic Container Service (ECS). const ( // AWSECSContainerARNKey is the attribute Key conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // AWSECSClusterARNKey is the attribute Key conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an // [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // AWSECSLaunchtypeKey is the attribute Key conforming to the // "aws.ecs.launchtype" semantic conventions. It represents the [launch // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) // for an ECS task. // // Type: Enum // RequirementLevel: Optional // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // AWSECSTaskARNKey is the attribute Key conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an // [ECS task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // AWSECSTaskFamilyKey is the attribute Key conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // AWSECSTaskRevisionKey is the attribute Key conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision // for this task definition. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // AWSECSContainerARN returns an attribute KeyValue conforming to the // "aws.ecs.container.arn" semantic conventions. It represents the Amazon // Resource Name (ARN) of an [ECS container // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). func AWSECSContainerARN(val string) attribute.KeyValue { return AWSECSContainerARNKey.String(val) } // AWSECSClusterARN returns an attribute KeyValue conforming to the // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). func AWSECSClusterARN(val string) attribute.KeyValue { return AWSECSClusterARNKey.String(val) } // AWSECSTaskARN returns an attribute KeyValue conforming to the // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS // task // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). func AWSECSTaskARN(val string) attribute.KeyValue { return AWSECSTaskARNKey.String(val) } // AWSECSTaskFamily returns an attribute KeyValue conforming to the // "aws.ecs.task.family" semantic conventions. It represents the task // definition family this task definition is a member of. func AWSECSTaskFamily(val string) attribute.KeyValue { return AWSECSTaskFamilyKey.String(val) } // AWSECSTaskRevision returns an attribute KeyValue conforming to the // "aws.ecs.task.revision" semantic conventions. It represents the revision for // this task definition. func AWSECSTaskRevision(val string) attribute.KeyValue { return AWSECSTaskRevisionKey.String(val) } // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // AWSEKSClusterARNKey is the attribute Key conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an // EKS cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // AWSEKSClusterARN returns an attribute KeyValue conforming to the // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS // cluster. func AWSEKSClusterARN(val string) attribute.KeyValue { return AWSEKSClusterARNKey.String(val) } // Resources specific to Amazon Web Services. const ( // AWSLogGroupNamesKey is the attribute Key conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of // the AWS log group(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like // multi-container applications, where a single application has sidecar // containers, and each write to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // AWSLogGroupARNsKey is the attribute Key conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon // Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // AWSLogStreamNamesKey is the attribute Key conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) // of the AWS log stream(s) an application is writing to. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // AWSLogStreamARNsKey is the attribute Key conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of // the AWS log stream(s). // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). // One log group can contain several log streams, so these ARNs necessarily // identify both a log group and a log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // AWSLogGroupNames returns an attribute KeyValue conforming to the // "aws.log.group.names" semantic conventions. It represents the name(s) of the // AWS log group(s) an application is writing to. func AWSLogGroupNames(val ...string) attribute.KeyValue { return AWSLogGroupNamesKey.StringSlice(val) } // AWSLogGroupARNs returns an attribute KeyValue conforming to the // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource // Name(s) (ARN) of the AWS log group(s). func AWSLogGroupARNs(val ...string) attribute.KeyValue { return AWSLogGroupARNsKey.StringSlice(val) } // AWSLogStreamNames returns an attribute KeyValue conforming to the // "aws.log.stream.names" semantic conventions. It represents the name(s) of // the AWS log stream(s) an application is writing to. func AWSLogStreamNames(val ...string) attribute.KeyValue { return AWSLogStreamNamesKey.StringSlice(val) } // AWSLogStreamARNs returns an attribute KeyValue conforming to the // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the // AWS log stream(s). func AWSLogStreamARNs(val ...string) attribute.KeyValue { return AWSLogStreamARNsKey.StringSlice(val) } // Resource used by Google Cloud Run. const ( // GCPCloudRunJobExecutionKey is the attribute Key conforming to the // "gcp.cloud_run.job.execution" semantic conventions. It represents the // name of the Cloud Run // [execution](https://cloud.google.com/run/docs/managing/job-executions) // being run for the Job, as set by the // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) // environment variable. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'job-name-xxxx', 'sample-job-mdw84' GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the // "gcp.cloud_run.job.task_index" semantic conventions. It represents the // index for a task within an execution as provided by the // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) // environment variable. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 1 GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") ) // GCPCloudRunJobExecution returns an attribute KeyValue conforming to the // "gcp.cloud_run.job.execution" semantic conventions. It represents the name // of the Cloud Run // [execution](https://cloud.google.com/run/docs/managing/job-executions) being // run for the Job, as set by the // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) // environment variable. func GCPCloudRunJobExecution(val string) attribute.KeyValue { return GCPCloudRunJobExecutionKey.String(val) } // GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index // for a task within an execution as provided by the // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) // environment variable. func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { return GCPCloudRunJobTaskIndexKey.Int(val) } // Resources used by Google Compute Engine (GCE). const ( // GCPGceInstanceNameKey is the attribute Key conforming to the // "gcp.gce.instance.name" semantic conventions. It represents the instance // name of a GCE instance. This is the value provided by `host.name`, the // visible name of the instance in the Cloud Console UI, and the prefix for // the default hostname of the instance as defined by the [default internal // DNS // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'instance-1', 'my-vm-name' GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") // GCPGceInstanceHostnameKey is the attribute Key conforming to the // "gcp.gce.instance.hostname" semantic conventions. It represents the // hostname of a GCE instance. This is the full value of the default or // [custom // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-host1234.example.com', // 'sample-vm.us-west1-b.c.my-project.internal' GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") ) // GCPGceInstanceName returns an attribute KeyValue conforming to the // "gcp.gce.instance.name" semantic conventions. It represents the instance // name of a GCE instance. This is the value provided by `host.name`, the // visible name of the instance in the Cloud Console UI, and the prefix for the // default hostname of the instance as defined by the [default internal DNS // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). func GCPGceInstanceName(val string) attribute.KeyValue { return GCPGceInstanceNameKey.String(val) } // GCPGceInstanceHostname returns an attribute KeyValue conforming to the // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname // of a GCE instance. This is the full value of the default or [custom // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). func GCPGceInstanceHostname(val string) attribute.KeyValue { return GCPGceInstanceHostnameKey.String(val) } // Heroku dyno metadata const ( // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the // "heroku.release.creation_timestamp" semantic conventions. It represents // the time and date the release was created // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2022-10-23T18:00:42Z' HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") // HerokuReleaseCommitKey is the attribute Key conforming to the // "heroku.release.commit" semantic conventions. It represents the commit // hash for the current release // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" // semantic conventions. It represents the unique identifier for the // application // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' HerokuAppIDKey = attribute.Key("heroku.app.id") ) // HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming // to the "heroku.release.creation_timestamp" semantic conventions. It // represents the time and date the release was created func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { return HerokuReleaseCreationTimestampKey.String(val) } // HerokuReleaseCommit returns an attribute KeyValue conforming to the // "heroku.release.commit" semantic conventions. It represents the commit hash // for the current release func HerokuReleaseCommit(val string) attribute.KeyValue { return HerokuReleaseCommitKey.String(val) } // HerokuAppID returns an attribute KeyValue conforming to the // "heroku.app.id" semantic conventions. It represents the unique identifier // for the application func HerokuAppID(val string) attribute.KeyValue { return HerokuAppIDKey.String(val) } // A container instance. const ( // ContainerNameKey is the attribute Key conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // ContainerIDKey is the attribute Key conforming to the "container.id" // semantic conventions. It represents the container ID. Usually a UUID, as // for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // ContainerRuntimeKey is the attribute Key conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // ContainerImageNameKey is the attribute Key conforming to the // "container.image.name" semantic conventions. It represents the name of // the image the container was built on. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // ContainerImageTagKey is the attribute Key conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") // ContainerImageIDKey is the attribute Key conforming to the // "container.image.id" semantic conventions. It represents the runtime // specific image identifier. Usually a hash algorithm followed by a UUID. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' // Note: Docker defines a sha256 of the image id; `container.image.id` // corresponds to the `Image` field from the Docker container inspect // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) // endpoint. // K8S defines a link to the container registry repository with digest // `"imageID": "registry.azurecr.io // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. // OCI defines a digest of manifest. ContainerImageIDKey = attribute.Key("container.image.id") // ContainerCommandKey is the attribute Key conforming to the // "container.command" semantic conventions. It represents the command used // to run the container (i.e. the command name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'otelcontribcol' // Note: If using embedded credentials or sensitive data, it is recommended // to remove them to prevent potential leakage. ContainerCommandKey = attribute.Key("container.command") // ContainerCommandLineKey is the attribute Key conforming to the // "container.command_line" semantic conventions. It represents the full // command run by the container as a single string representing the full // command. [2] // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'otelcontribcol --config config.yaml' ContainerCommandLineKey = attribute.Key("container.command_line") // ContainerCommandArgsKey is the attribute Key conforming to the // "container.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) run by the // container. [2] // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'otelcontribcol, --config, config.yaml' ContainerCommandArgsKey = attribute.Key("container.command_args") ) // ContainerName returns an attribute KeyValue conforming to the // "container.name" semantic conventions. It represents the container name used // by container runtime. func ContainerName(val string) attribute.KeyValue { return ContainerNameKey.String(val) } // ContainerID returns an attribute KeyValue conforming to the // "container.id" semantic conventions. It represents the container ID. Usually // a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container-identification). // The UUID might be abbreviated. func ContainerID(val string) attribute.KeyValue { return ContainerIDKey.String(val) } // ContainerRuntime returns an attribute KeyValue conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. func ContainerRuntime(val string) attribute.KeyValue { return ContainerRuntimeKey.String(val) } // ContainerImageName returns an attribute KeyValue conforming to the // "container.image.name" semantic conventions. It represents the name of the // image the container was built on. func ContainerImageName(val string) attribute.KeyValue { return ContainerImageNameKey.String(val) } // ContainerImageTag returns an attribute KeyValue conforming to the // "container.image.tag" semantic conventions. It represents the container // image tag. func ContainerImageTag(val string) attribute.KeyValue { return ContainerImageTagKey.String(val) } // ContainerImageID returns an attribute KeyValue conforming to the // "container.image.id" semantic conventions. It represents the runtime // specific image identifier. Usually a hash algorithm followed by a UUID. func ContainerImageID(val string) attribute.KeyValue { return ContainerImageIDKey.String(val) } // ContainerCommand returns an attribute KeyValue conforming to the // "container.command" semantic conventions. It represents the command used to // run the container (i.e. the command name). func ContainerCommand(val string) attribute.KeyValue { return ContainerCommandKey.String(val) } // ContainerCommandLine returns an attribute KeyValue conforming to the // "container.command_line" semantic conventions. It represents the full // command run by the container as a single string representing the full // command. [2] func ContainerCommandLine(val string) attribute.KeyValue { return ContainerCommandLineKey.String(val) } // ContainerCommandArgs returns an attribute KeyValue conforming to the // "container.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) run by the // container. [2] func ContainerCommandArgs(val ...string) attribute.KeyValue { return ContainerCommandArgsKey.StringSlice(val) } // The software deployment. const ( // DeploymentEnvironmentKey is the attribute Key conforming to the // "deployment.environment" semantic conventions. It represents the name of // the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // DeploymentEnvironment returns an attribute KeyValue conforming to the // "deployment.environment" semantic conventions. It represents the name of the // [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). func DeploymentEnvironment(val string) attribute.KeyValue { return DeploymentEnvironmentKey.String(val) } // The device on which the process represented by this resource is running. const ( // DeviceIDKey is the attribute Key conforming to the "device.id" semantic // conventions. It represents a unique identifier representing the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values // outlined below. This value is not an advertising identifier and MUST NOT // be used as such. On iOS (Swift or Objective-C), this value MUST be equal // to the [vendor // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). // On Android (Java or Kotlin), this value MUST be equal to the Firebase // Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on // best practices and exact implementation details. Caution should be taken // when storing personal data or anything which can identify a user. GDPR // and data protection laws may apply, ensure you do your own due // diligence. DeviceIDKey = attribute.Key("device.id") // DeviceModelIdentifierKey is the attribute Key conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version // of the model identifier rather than the market or consumer-friendly name // of the device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // DeviceModelNameKey is the attribute Key conforming to the // "device.model.name" semantic conventions. It represents the marketing // name for the device model // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of // the device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // DeviceManufacturerKey is the attribute Key conforming to the // "device.manufacturer" semantic conventions. It represents the name of // the device manufacturer // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // DeviceID returns an attribute KeyValue conforming to the "device.id" // semantic conventions. It represents a unique identifier representing the // device func DeviceID(val string) attribute.KeyValue { return DeviceIDKey.String(val) } // DeviceModelIdentifier returns an attribute KeyValue conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device func DeviceModelIdentifier(val string) attribute.KeyValue { return DeviceModelIdentifierKey.String(val) } // DeviceModelName returns an attribute KeyValue conforming to the // "device.model.name" semantic conventions. It represents the marketing name // for the device model func DeviceModelName(val string) attribute.KeyValue { return DeviceModelNameKey.String(val) } // DeviceManufacturer returns an attribute KeyValue conforming to the // "device.manufacturer" semantic conventions. It represents the name of the // device manufacturer func DeviceManufacturer(val string) attribute.KeyValue { return DeviceManufacturerKey.String(val) } // A serverless instance. const ( // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic // conventions. It represents the name of the single function that this // runtime instance executes. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function', 'myazurefunctionapp/some-function-name' // Note: This is the name of the function as configured/deployed on the // FaaS // platform and is usually different from the name of the callback // function (which may be stored in the // [`code.namespace`/`code.function`](/docs/general/general-attributes.md#source-code-attributes) // span attributes). // // For some cloud providers, the above definition is ambiguous. The // following // definition of function name MUST be used for this attribute // (and consequently the span name) for the listed cloud // providers/products: // // * **Azure:** The full name `/`, i.e., function app name // followed by a forward slash followed by the function name (this form // can also be seen in the resource JSON for the function). // This means that a span attribute MUST be used, as an Azure function // app can host multiple functions that would usually share // a TracerProvider (see also the `cloud.resource_id` attribute). FaaSNameKey = attribute.Key("faas.name") // FaaSVersionKey is the attribute Key conforming to the "faas.version" // semantic conventions. It represents the immutable version of the // function being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run (Services):** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" // semantic conventions. It represents the execution environment ID as a // string, that will be potentially reused for other invocations to the // same function/function version. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // FaaSMaxMemoryKey is the attribute Key conforming to the // "faas.max_memory" semantic conventions. It represents the amount of // memory available to the serverless function converted to Bytes. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 134217728 // Note: It's recommended to set this attribute since e.g. too little // memory can easily stop a Java AWS Lambda function from working // correctly. On AWS Lambda, the environment variable // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must // be multiplied by 1,048,576). FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // FaaSName returns an attribute KeyValue conforming to the "faas.name" // semantic conventions. It represents the name of the single function that // this runtime instance executes. func FaaSName(val string) attribute.KeyValue { return FaaSNameKey.String(val) } // FaaSVersion returns an attribute KeyValue conforming to the // "faas.version" semantic conventions. It represents the immutable version of // the function being executed. func FaaSVersion(val string) attribute.KeyValue { return FaaSVersionKey.String(val) } // FaaSInstance returns an attribute KeyValue conforming to the // "faas.instance" semantic conventions. It represents the execution // environment ID as a string, that will be potentially reused for other // invocations to the same function/function version. func FaaSInstance(val string) attribute.KeyValue { return FaaSInstanceKey.String(val) } // FaaSMaxMemory returns an attribute KeyValue conforming to the // "faas.max_memory" semantic conventions. It represents the amount of memory // available to the serverless function converted to Bytes. func FaaSMaxMemory(val int) attribute.KeyValue { return FaaSMaxMemoryKey.Int(val) } // A host is defined as a computing instance. For example, physical servers, // virtual machines, switches or disk array. const ( // HostIDKey is the attribute Key conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be // the instance_id assigned by the cloud provider. For non-containerized // systems, this should be the `machine-id`. See the table below for the // sources to use to determine the `machine-id` based on operating system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'fdbf79e8af94cb7f9e8df36789187052' HostIDKey = attribute.Key("host.id") // HostNameKey is the attribute Key conforming to the "host.name" semantic // conventions. It represents the name of the host. On Unix systems, it may // contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // HostTypeKey is the attribute Key conforming to the "host.type" semantic // conventions. It represents the type of host. For Cloud, this must be the // machine type. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // HostArchKey is the attribute Key conforming to the "host.arch" semantic // conventions. It represents the CPU architecture the host system is // running on. // // Type: Enum // RequirementLevel: Optional // Stability: stable HostArchKey = attribute.Key("host.arch") // HostImageNameKey is the attribute Key conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // HostImageIDKey is the attribute Key conforming to the "host.image.id" // semantic conventions. It represents the vM image ID or host OS image ID. // For Cloud, this value is from the provider. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // HostImageVersionKey is the attribute Key conforming to the // "host.image.version" semantic conventions. It represents the version // string of the VM image or host OS as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // HostID returns an attribute KeyValue conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be the // instance_id assigned by the cloud provider. For non-containerized systems, // this should be the `machine-id`. See the table below for the sources to use // to determine the `machine-id` based on operating system. func HostID(val string) attribute.KeyValue { return HostIDKey.String(val) } // HostName returns an attribute KeyValue conforming to the "host.name" // semantic conventions. It represents the name of the host. On Unix systems, // it may contain what the hostname command returns, or the fully qualified // hostname, or another name specified by the user. func HostName(val string) attribute.KeyValue { return HostNameKey.String(val) } // HostType returns an attribute KeyValue conforming to the "host.type" // semantic conventions. It represents the type of host. For Cloud, this must // be the machine type. func HostType(val string) attribute.KeyValue { return HostTypeKey.String(val) } // HostImageName returns an attribute KeyValue conforming to the // "host.image.name" semantic conventions. It represents the name of the VM // image or OS install the host was instantiated from. func HostImageName(val string) attribute.KeyValue { return HostImageNameKey.String(val) } // HostImageID returns an attribute KeyValue conforming to the // "host.image.id" semantic conventions. It represents the vM image ID or host // OS image ID. For Cloud, this value is from the provider. func HostImageID(val string) attribute.KeyValue { return HostImageIDKey.String(val) } // HostImageVersion returns an attribute KeyValue conforming to the // "host.image.version" semantic conventions. It represents the version string // of the VM image or host OS as defined in [Version // Attributes](README.md#version-attributes). func HostImageVersion(val string) attribute.KeyValue { return HostImageVersionKey.String(val) } // A Kubernetes Cluster. const ( // K8SClusterNameKey is the attribute Key conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") // K8SClusterUIDKey is the attribute Key conforming to the // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for // the cluster, set to the UID of the `kube-system` namespace. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' // Note: K8S does not have support for obtaining a cluster ID. If this is // ever // added, we will recommend collecting the `k8s.cluster.uid` through the // official APIs. In the meantime, we are able to use the `uid` of the // `kube-system` namespace as a proxy for cluster ID. Read on for the // rationale. // // Every object created in a K8S cluster is assigned a distinct UID. The // `kube-system` namespace is used by Kubernetes itself and will exist // for the lifetime of the cluster. Using the `uid` of the `kube-system` // namespace is a reasonable proxy for the K8S ClusterID as it will only // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are // UUIDs as standardized by // [ISO/IEC 9834-8 and ITU-T // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). // Which states: // // > If generated according to one of the mechanisms defined in Rec. // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be // different from all other UUIDs generated before 3603 A.D., or is // extremely likely to be different (depending on the mechanism chosen). // // Therefore, UIDs between clusters should be extremely unlikely to // conflict. K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") ) // K8SClusterName returns an attribute KeyValue conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the // cluster. func K8SClusterName(val string) attribute.KeyValue { return K8SClusterNameKey.String(val) } // K8SClusterUID returns an attribute KeyValue conforming to the // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the // cluster, set to the UID of the `kube-system` namespace. func K8SClusterUID(val string) attribute.KeyValue { return K8SClusterUIDKey.String(val) } // A Kubernetes Node object. const ( // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" // semantic conventions. It represents the name of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // K8SNodeName returns an attribute KeyValue conforming to the // "k8s.node.name" semantic conventions. It represents the name of the Node. func K8SNodeName(val string) attribute.KeyValue { return K8SNodeNameKey.String(val) } // K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" // semantic conventions. It represents the UID of the Node. func K8SNodeUID(val string) attribute.KeyValue { return K8SNodeUIDKey.String(val) } // A Kubernetes Namespace. const ( // K8SNamespaceNameKey is the attribute Key conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // K8SNamespaceName returns an attribute KeyValue conforming to the // "k8s.namespace.name" semantic conventions. It represents the name of the // namespace that the pod is running in. func K8SNamespaceName(val string) attribute.KeyValue { return K8SNamespaceNameKey.String(val) } // A Kubernetes Pod object. const ( // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" // semantic conventions. It represents the UID of the Pod. func K8SPodUID(val string) attribute.KeyValue { return K8SPodUIDKey.String(val) } // K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" // semantic conventions. It represents the name of the Pod. func K8SPodName(val string) attribute.KeyValue { return K8SPodNameKey.String(val) } // A container in a // [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // K8SContainerNameKey is the attribute Key conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // K8SContainerRestartCountKey is the attribute Key conforming to the // "k8s.container.restart_count" semantic conventions. It represents the // number of times the container was restarted. This attribute can be used // to identify a particular container (running or stopped) within a // container spec. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // K8SContainerName returns an attribute KeyValue conforming to the // "k8s.container.name" semantic conventions. It represents the name of the // Container from Pod specification, must be unique within a Pod. Container // runtime usually uses different globally unique name (`container.name`). func K8SContainerName(val string) attribute.KeyValue { return K8SContainerNameKey.String(val) } // K8SContainerRestartCount returns an attribute KeyValue conforming to the // "k8s.container.restart_count" semantic conventions. It represents the number // of times the container was restarted. This attribute can be used to identify // a particular container (running or stopped) within a container spec. func K8SContainerRestartCount(val int) attribute.KeyValue { return K8SContainerRestartCountKey.Int(val) } // A Kubernetes ReplicaSet object. const ( // K8SReplicaSetUIDKey is the attribute Key conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // K8SReplicaSetNameKey is the attribute Key conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of // the ReplicaSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // K8SReplicaSetUID returns an attribute KeyValue conforming to the // "k8s.replicaset.uid" semantic conventions. It represents the UID of the // ReplicaSet. func K8SReplicaSetUID(val string) attribute.KeyValue { return K8SReplicaSetUIDKey.String(val) } // K8SReplicaSetName returns an attribute KeyValue conforming to the // "k8s.replicaset.name" semantic conventions. It represents the name of the // ReplicaSet. func K8SReplicaSetName(val string) attribute.KeyValue { return K8SReplicaSetNameKey.String(val) } // A Kubernetes Deployment object. const ( // K8SDeploymentUIDKey is the attribute Key conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // K8SDeploymentNameKey is the attribute Key conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of // the Deployment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // K8SDeploymentUID returns an attribute KeyValue conforming to the // "k8s.deployment.uid" semantic conventions. It represents the UID of the // Deployment. func K8SDeploymentUID(val string) attribute.KeyValue { return K8SDeploymentUIDKey.String(val) } // K8SDeploymentName returns an attribute KeyValue conforming to the // "k8s.deployment.name" semantic conventions. It represents the name of the // Deployment. func K8SDeploymentName(val string) attribute.KeyValue { return K8SDeploymentNameKey.String(val) } // A Kubernetes StatefulSet object. const ( // K8SStatefulSetUIDKey is the attribute Key conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // K8SStatefulSetNameKey is the attribute Key conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of // the StatefulSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // K8SStatefulSetUID returns an attribute KeyValue conforming to the // "k8s.statefulset.uid" semantic conventions. It represents the UID of the // StatefulSet. func K8SStatefulSetUID(val string) attribute.KeyValue { return K8SStatefulSetUIDKey.String(val) } // K8SStatefulSetName returns an attribute KeyValue conforming to the // "k8s.statefulset.name" semantic conventions. It represents the name of the // StatefulSet. func K8SStatefulSetName(val string) attribute.KeyValue { return K8SStatefulSetNameKey.String(val) } // A Kubernetes DaemonSet object. const ( // K8SDaemonSetUIDKey is the attribute Key conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // K8SDaemonSetNameKey is the attribute Key conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // K8SDaemonSetUID returns an attribute KeyValue conforming to the // "k8s.daemonset.uid" semantic conventions. It represents the UID of the // DaemonSet. func K8SDaemonSetUID(val string) attribute.KeyValue { return K8SDaemonSetUIDKey.String(val) } // K8SDaemonSetName returns an attribute KeyValue conforming to the // "k8s.daemonset.name" semantic conventions. It represents the name of the // DaemonSet. func K8SDaemonSetName(val string) attribute.KeyValue { return K8SDaemonSetNameKey.String(val) } // A Kubernetes Job object. const ( // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } // K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" // semantic conventions. It represents the name of the Job. func K8SJobName(val string) attribute.KeyValue { return K8SJobNameKey.String(val) } // A Kubernetes CronJob object. const ( // K8SCronJobUIDKey is the attribute Key conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // K8SCronJobNameKey is the attribute Key conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // K8SCronJobUID returns an attribute KeyValue conforming to the // "k8s.cronjob.uid" semantic conventions. It represents the UID of the // CronJob. func K8SCronJobUID(val string) attribute.KeyValue { return K8SCronJobUIDKey.String(val) } // K8SCronJobName returns an attribute KeyValue conforming to the // "k8s.cronjob.name" semantic conventions. It represents the name of the // CronJob. func K8SCronJobName(val string) attribute.KeyValue { return K8SCronJobNameKey.String(val) } // The operating system (OS) on which the process represented by this resource // is running. const ( // OSTypeKey is the attribute Key conforming to the "os.type" semantic // conventions. It represents the operating system type. // // Type: Enum // RequirementLevel: Required // Stability: stable OSTypeKey = attribute.Key("os.type") // OSDescriptionKey is the attribute Key conforming to the "os.description" // semantic conventions. It represents the human readable (not intended to // be parsed) OS version information, like e.g. reported by `ver` or // `lsb_release -a` commands. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 // LTS' OSDescriptionKey = attribute.Key("os.description") // OSNameKey is the attribute Key conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // OSVersionKey is the attribute Key conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](/docs/resource/README.md#version-attributes). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // SunOS, Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // OSDescription returns an attribute KeyValue conforming to the // "os.description" semantic conventions. It represents the human readable (not // intended to be parsed) OS version information, like e.g. reported by `ver` // or `lsb_release -a` commands. func OSDescription(val string) attribute.KeyValue { return OSDescriptionKey.String(val) } // OSName returns an attribute KeyValue conforming to the "os.name" semantic // conventions. It represents the human readable operating system name. func OSName(val string) attribute.KeyValue { return OSNameKey.String(val) } // OSVersion returns an attribute KeyValue conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version // Attributes](/docs/resource/README.md#version-attributes). func OSVersion(val string) attribute.KeyValue { return OSVersionKey.String(val) } // An operating system process. const ( // ProcessPIDKey is the attribute Key conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // ProcessParentPIDKey is the attribute Key conforming to the // "process.parent_pid" semantic conventions. It represents the parent // Process identifier (PID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 111 ProcessParentPIDKey = attribute.Key("process.parent_pid") // ProcessExecutableNameKey is the attribute Key conforming to the // "process.executable.name" semantic conventions. It represents the name // of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name // of `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // ProcessExecutablePathKey is the attribute Key conforming to the // "process.executable.path" semantic conventions. It represents the full // path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // ProcessCommandKey is the attribute Key conforming to the // "process.command" semantic conventions. It represents the command used // to launch the process (i.e. the command name). On Linux based systems, // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can // be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // ProcessCommandLineKey is the attribute Key conforming to the // "process.command_line" semantic conventions. It represents the full // command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. // Do not set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // ProcessCommandArgsKey is the attribute Key conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. // // Type: string[] // RequirementLevel: ConditionallyRequired (See alternative attributes // below.) // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // ProcessOwnerKey is the attribute Key conforming to the "process.owner" // semantic conventions. It represents the username of the user that owns // the process. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // ProcessPID returns an attribute KeyValue conforming to the "process.pid" // semantic conventions. It represents the process identifier (PID). func ProcessPID(val int) attribute.KeyValue { return ProcessPIDKey.Int(val) } // ProcessParentPID returns an attribute KeyValue conforming to the // "process.parent_pid" semantic conventions. It represents the parent Process // identifier (PID). func ProcessParentPID(val int) attribute.KeyValue { return ProcessParentPIDKey.Int(val) } // ProcessExecutableName returns an attribute KeyValue conforming to the // "process.executable.name" semantic conventions. It represents the name of // the process executable. On Linux based systems, can be set to the `Name` in // `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. func ProcessExecutableName(val string) attribute.KeyValue { return ProcessExecutableNameKey.String(val) } // ProcessExecutablePath returns an attribute KeyValue conforming to the // "process.executable.path" semantic conventions. It represents the full path // to the process executable. On Linux based systems, can be set to the target // of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. func ProcessExecutablePath(val string) attribute.KeyValue { return ProcessExecutablePathKey.String(val) } // ProcessCommand returns an attribute KeyValue conforming to the // "process.command" semantic conventions. It represents the command used to // launch the process (i.e. the command name). On Linux based systems, can be // set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to // the first parameter extracted from `GetCommandLineW`. func ProcessCommand(val string) attribute.KeyValue { return ProcessCommandKey.String(val) } // ProcessCommandLine returns an attribute KeyValue conforming to the // "process.command_line" semantic conventions. It represents the full command // used to launch the process as a single string representing the full command. // On Windows, can be set to the result of `GetCommandLineW`. Do not set this // if you have to assemble it just for monitoring; use `process.command_args` // instead. func ProcessCommandLine(val string) attribute.KeyValue { return ProcessCommandLineKey.String(val) } // ProcessCommandArgs returns an attribute KeyValue conforming to the // "process.command_args" semantic conventions. It represents the all the // command arguments (including the command/executable itself) as received by // the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, // this would be the full argv vector passed to `main`. func ProcessCommandArgs(val ...string) attribute.KeyValue { return ProcessCommandArgsKey.StringSlice(val) } // ProcessOwner returns an attribute KeyValue conforming to the // "process.owner" semantic conventions. It represents the username of the user // that owns the process. func ProcessOwner(val string) attribute.KeyValue { return ProcessOwnerKey.String(val) } // The single (language) runtime instance which is monitored. const ( // ProcessRuntimeNameKey is the attribute Key conforming to the // "process.runtime.name" semantic conventions. It represents the name of // the runtime of this process. For compiled native binaries, this SHOULD // be the name of the compiler. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // ProcessRuntimeVersionKey is the attribute Key conforming to the // "process.runtime.version" semantic conventions. It represents the // version of the runtime of this process, as returned by the runtime // without modification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // ProcessRuntimeDescriptionKey is the attribute Key conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // ProcessRuntimeName returns an attribute KeyValue conforming to the // "process.runtime.name" semantic conventions. It represents the name of the // runtime of this process. For compiled native binaries, this SHOULD be the // name of the compiler. func ProcessRuntimeName(val string) attribute.KeyValue { return ProcessRuntimeNameKey.String(val) } // ProcessRuntimeVersion returns an attribute KeyValue conforming to the // "process.runtime.version" semantic conventions. It represents the version of // the runtime of this process, as returned by the runtime without // modification. func ProcessRuntimeVersion(val string) attribute.KeyValue { return ProcessRuntimeVersionKey.String(val) } // ProcessRuntimeDescription returns an attribute KeyValue conforming to the // "process.runtime.description" semantic conventions. It represents an // additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. func ProcessRuntimeDescription(val string) attribute.KeyValue { return ProcessRuntimeDescriptionKey.String(val) } // A service instance. const ( // ServiceNameKey is the attribute Key conforming to the "service.name" // semantic conventions. It represents the logical name of the service. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled // services. If the value was not specified, SDKs MUST fallback to // `unknown_service:` concatenated with // [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, // the value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // ServiceVersionKey is the attribute Key conforming to the // "service.version" semantic conventions. It represents the version string // of the service API or implementation. The format is not defined by these // conventions. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2.0.0', 'a01dbef8a' ServiceVersionKey = attribute.Key("service.version") ) // ServiceName returns an attribute KeyValue conforming to the // "service.name" semantic conventions. It represents the logical name of the // service. func ServiceName(val string) attribute.KeyValue { return ServiceNameKey.String(val) } // ServiceVersion returns an attribute KeyValue conforming to the // "service.version" semantic conventions. It represents the version string of // the service API or implementation. The format is not defined by these // conventions. func ServiceVersion(val string) attribute.KeyValue { return ServiceVersionKey.String(val) } // A service instance. const ( // ServiceNamespaceKey is the attribute Key conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group // of services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` // is expected to be unique for all services that have no explicit // namespace defined (so the empty/unspecified namespace is simply one more // valid namespace). Zero-length namespace string is assumed equal to // unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // ServiceInstanceIDKey is the attribute Key conforming to the // "service.instance.id" semantic conventions. It represents the string ID // of the service instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'my-k8s-pod-deployment-1', // '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be // globally unique). The ID helps to distinguish instances of the same // service that exist at the same time (e.g. instances of a horizontally // scaled service). It is preferable for the ID to be persistent and stay // the same for the lifetime of the service instance, however it is // acceptable that the ID is ephemeral and changes during important // lifetime events for the service (e.g. service restarts). If the service // has no inherent unique ID that can be used as the value of this // attribute it is recommended to generate a random Version 1 or Version 4 // RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") ) // ServiceNamespace returns an attribute KeyValue conforming to the // "service.namespace" semantic conventions. It represents a namespace for // `service.name`. func ServiceNamespace(val string) attribute.KeyValue { return ServiceNamespaceKey.String(val) } // ServiceInstanceID returns an attribute KeyValue conforming to the // "service.instance.id" semantic conventions. It represents the string ID of // the service instance. func ServiceInstanceID(val string) attribute.KeyValue { return ServiceInstanceIDKey.String(val) } // The telemetry SDK used to capture data recorded by the instrumentation // libraries. const ( // TelemetrySDKNameKey is the attribute Key conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'opentelemetry' // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute // to `opentelemetry`. // If another SDK, like a fork or a vendor-provided implementation, is // used, this SDK MUST set the // `telemetry.sdk.name` attribute to the fully-qualified class or module // name of this SDK's main entry point // or another suitable identifier depending on the language. // The identifier `opentelemetry` is reserved and MUST NOT be used in this // case. // All custom identifiers SHOULD be stable across different versions of an // implementation. TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // TelemetrySDKLanguageKey is the attribute Key conforming to the // "telemetry.sdk.language" semantic conventions. It represents the // language of the telemetry SDK. // // Type: Enum // RequirementLevel: Required // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // TelemetrySDKVersionKey is the attribute Key conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // rust TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ) // TelemetrySDKName returns an attribute KeyValue conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. func TelemetrySDKName(val string) attribute.KeyValue { return TelemetrySDKNameKey.String(val) } // TelemetrySDKVersion returns an attribute KeyValue conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. func TelemetrySDKVersion(val string) attribute.KeyValue { return TelemetrySDKVersionKey.String(val) } // The telemetry SDK used to capture data recorded by the instrumentation // libraries. const ( // TelemetryAutoVersionKey is the attribute Key conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) // TelemetryAutoVersion returns an attribute KeyValue conforming to the // "telemetry.auto.version" semantic conventions. It represents the version // string of the auto instrumentation agent, if used. func TelemetryAutoVersion(val string) attribute.KeyValue { return TelemetryAutoVersionKey.String(val) } // Resource describing the packaged software running the application code. Web // engines are typically executed using process.runtime. const ( // WebEngineNameKey is the attribute Key conforming to the "webengine.name" // semantic conventions. It represents the name of the web engine. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // WebEngineVersionKey is the attribute Key conforming to the // "webengine.version" semantic conventions. It represents the version of // the web engine. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // WebEngineDescriptionKey is the attribute Key conforming to the // "webengine.description" semantic conventions. It represents the // additional description of the web engine (e.g. detailed version and // edition information). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - // 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) // WebEngineName returns an attribute KeyValue conforming to the // "webengine.name" semantic conventions. It represents the name of the web // engine. func WebEngineName(val string) attribute.KeyValue { return WebEngineNameKey.String(val) } // WebEngineVersion returns an attribute KeyValue conforming to the // "webengine.version" semantic conventions. It represents the version of the // web engine. func WebEngineVersion(val string) attribute.KeyValue { return WebEngineVersionKey.String(val) } // WebEngineDescription returns an attribute KeyValue conforming to the // "webengine.description" semantic conventions. It represents the additional // description of the web engine (e.g. detailed version and edition // information). func WebEngineDescription(val string) attribute.KeyValue { return WebEngineDescriptionKey.String(val) } // Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's // concepts. const ( // OTelScopeNameKey is the attribute Key conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' OTelScopeNameKey = attribute.Key("otel.scope.name") // OTelScopeVersionKey is the attribute Key conforming to the // "otel.scope.version" semantic conventions. It represents the version of // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0.0' OTelScopeVersionKey = attribute.Key("otel.scope.version") ) // OTelScopeName returns an attribute KeyValue conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). func OTelScopeName(val string) attribute.KeyValue { return OTelScopeNameKey.String(val) } // OTelScopeVersion returns an attribute KeyValue conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). func OTelScopeVersion(val string) attribute.KeyValue { return OTelScopeVersionKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry // Scope's concepts. const ( // OTelLibraryNameKey is the attribute Key conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, // use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' OTelLibraryNameKey = attribute.Key("otel.library.name") // OTelLibraryVersionKey is the attribute Key conforming to the // "otel.library.version" semantic conventions. It represents the // deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '1.0.0' OTelLibraryVersionKey = attribute.Key("otel.library.version") ) // OTelLibraryName returns an attribute KeyValue conforming to the // "otel.library.name" semantic conventions. It represents the deprecated, use // the `otel.scope.name` attribute. func OTelLibraryName(val string) attribute.KeyValue { return OTelLibraryNameKey.String(val) } // OTelLibraryVersion returns an attribute KeyValue conforming to the // "otel.library.version" semantic conventions. It represents the deprecated, // use the `otel.scope.version` attribute. func OTelLibraryVersion(val string) attribute.KeyValue { return OTelLibraryVersionKey.String(val) } opentelemetry-go-1.21.0/semconv/v1.21.0/schema.go000066400000000000000000000017141452547353200212550ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.21.0" opentelemetry-go-1.21.0/semconv/v1.21.0/trace.go000066400000000000000000003070671452547353200211250ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" import "go.opentelemetry.io/otel/attribute" // The shared attributes used to report a single exception associated with a // span or log. const ( // ExceptionTypeKey is the attribute Key conforming to the "exception.type" // semantic conventions. It represents the type of the exception (its // fully-qualified class name, if applicable). The dynamic type of the // exception should be preferred over the static type in languages that // support it. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // ExceptionMessageKey is the attribute Key conforming to the // "exception.message" semantic conventions. It represents the exception // message. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str // implicitly" ExceptionMessageKey = attribute.Key("exception.message") // ExceptionStacktraceKey is the attribute Key conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace // as a string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") ) // ExceptionType returns an attribute KeyValue conforming to the // "exception.type" semantic conventions. It represents the type of the // exception (its fully-qualified class name, if applicable). The dynamic type // of the exception should be preferred over the static type in languages that // support it. func ExceptionType(val string) attribute.KeyValue { return ExceptionTypeKey.String(val) } // ExceptionMessage returns an attribute KeyValue conforming to the // "exception.message" semantic conventions. It represents the exception // message. func ExceptionMessage(val string) attribute.KeyValue { return ExceptionMessageKey.String(val) } // ExceptionStacktrace returns an attribute KeyValue conforming to the // "exception.stacktrace" semantic conventions. It represents a stacktrace as a // string in the natural representation for the language runtime. The // representation is to be determined and documented by each language SIG. func ExceptionStacktrace(val string) attribute.KeyValue { return ExceptionStacktraceKey.String(val) } // Span attributes used by AWS Lambda (in addition to general `faas` // attributes). const ( // AWSLambdaInvokedARNKey is the attribute Key conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `cloud.resource_id` if an alias is // involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // AWSLambdaInvokedARN returns an attribute KeyValue conforming to the // "aws.lambda.invoked_arn" semantic conventions. It represents the full // invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the // `/runtime/invocation/next` applicable). func AWSLambdaInvokedARN(val string) attribute.KeyValue { return AWSLambdaInvokedARNKey.String(val) } // Attributes for CloudEvents. CloudEvents is a specification on how to define // event data in a standard way. These attributes can be attached to spans when // performing operations with CloudEvents, regardless of the protocol being // used. const ( // CloudeventsEventIDKey is the attribute Key conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") // CloudeventsEventSourceKey is the attribute Key conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'https://github.com/cloudevents', // '/cloudevents/spec/pull/123', 'my-service' CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") // CloudeventsEventSpecVersionKey is the attribute Key conforming to the // "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '1.0' CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") // CloudeventsEventTypeKey is the attribute Key conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.github.pull_request.opened', // 'com.example.object.deleted.v2' CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") // CloudeventsEventSubjectKey is the attribute Key conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by // source). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'mynewfile.jpg' CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") ) // CloudeventsEventID returns an attribute KeyValue conforming to the // "cloudevents.event_id" semantic conventions. It represents the // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) // uniquely identifies the event. func CloudeventsEventID(val string) attribute.KeyValue { return CloudeventsEventIDKey.String(val) } // CloudeventsEventSource returns an attribute KeyValue conforming to the // "cloudevents.event_source" semantic conventions. It represents the // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) // identifies the context in which an event happened. func CloudeventsEventSource(val string) attribute.KeyValue { return CloudeventsEventSourceKey.String(val) } // CloudeventsEventSpecVersion returns an attribute KeyValue conforming to // the "cloudevents.event_spec_version" semantic conventions. It represents the // [version of the CloudEvents // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) // which the event uses. func CloudeventsEventSpecVersion(val string) attribute.KeyValue { return CloudeventsEventSpecVersionKey.String(val) } // CloudeventsEventType returns an attribute KeyValue conforming to the // "cloudevents.event_type" semantic conventions. It represents the // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) // contains a value describing the type of event related to the originating // occurrence. func CloudeventsEventType(val string) attribute.KeyValue { return CloudeventsEventTypeKey.String(val) } // CloudeventsEventSubject returns an attribute KeyValue conforming to the // "cloudevents.event_subject" semantic conventions. It represents the // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) // of the event in the context of the event producer (identified by source). func CloudeventsEventSubject(val string) attribute.KeyValue { return CloudeventsEventSubjectKey.String(val) } // Semantic conventions for the OpenTracing Shim const ( // OpentracingRefTypeKey is the attribute Key conforming to the // "opentracing.ref_type" semantic conventions. It represents the // parent-child Reference type // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // The attributes used to perform database client calls. const ( // DBSystemKey is the attribute Key conforming to the "db.system" semantic // conventions. It represents an identifier for the database management // system (DBMS) product being used. See below for a list of well-known // identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable DBSystemKey = attribute.Key("db.system") // DBConnectionStringKey is the attribute Key conforming to the // "db.connection_string" semantic conventions. It represents the // connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // DBUserKey is the attribute Key conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // DBJDBCDriverClassnameKey is the attribute Key conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) // driver used to connect. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // DBNameKey is the attribute Key conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name // of the database being accessed. For commands that switch the database, // this should be set to the target database (even if the command fails). // // Type: string // RequirementLevel: ConditionallyRequired (If applicable.) // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called // "schema name". In case there are multiple layers that could be // considered for database name (e.g. Oracle instance name and schema // name), the database name to be used is the more specific layer (e.g. // Oracle schema name). DBNameKey = attribute.Key("db.name") // DBStatementKey is the attribute Key conforming to the "db.statement" // semantic conventions. It represents the database statement being // executed. // // Type: string // RequirementLevel: Recommended (Should be collected by default only if // there is sanitization that excludes sensitive information.) // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' DBStatementKey = attribute.Key("db.statement") // DBOperationKey is the attribute Key conforming to the "db.operation" // semantic conventions. It represents the name of the operation being // executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // RequirementLevel: ConditionallyRequired (If `db.statement` is not // applicable.) // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to // attempt any client-side parsing of `db.statement` just to get this // property, but it should be set if the operation name is provided by the // library being instrumented. If the SQL statement has an ambiguous // operation, or performs more than one operation, this value may be // omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // Microsoft SQL Server Compact DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") // OpenSearch DBSystemOpensearch = DBSystemKey.String("opensearch") // ClickHouse DBSystemClickhouse = DBSystemKey.String("clickhouse") // Cloud Spanner DBSystemSpanner = DBSystemKey.String("spanner") // Trino DBSystemTrino = DBSystemKey.String("trino") ) // DBConnectionString returns an attribute KeyValue conforming to the // "db.connection_string" semantic conventions. It represents the connection // string used to connect to the database. It is recommended to remove embedded // credentials. func DBConnectionString(val string) attribute.KeyValue { return DBConnectionStringKey.String(val) } // DBUser returns an attribute KeyValue conforming to the "db.user" semantic // conventions. It represents the username for accessing the database. func DBUser(val string) attribute.KeyValue { return DBUserKey.String(val) } // DBJDBCDriverClassname returns an attribute KeyValue conforming to the // "db.jdbc.driver_classname" semantic conventions. It represents the // fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. func DBJDBCDriverClassname(val string) attribute.KeyValue { return DBJDBCDriverClassnameKey.String(val) } // DBName returns an attribute KeyValue conforming to the "db.name" semantic // conventions. It represents the this attribute is used to report the name of // the database being accessed. For commands that switch the database, this // should be set to the target database (even if the command fails). func DBName(val string) attribute.KeyValue { return DBNameKey.String(val) } // DBStatement returns an attribute KeyValue conforming to the // "db.statement" semantic conventions. It represents the database statement // being executed. func DBStatement(val string) attribute.KeyValue { return DBStatementKey.String(val) } // DBOperation returns an attribute KeyValue conforming to the // "db.operation" semantic conventions. It represents the name of the operation // being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. func DBOperation(val string) attribute.KeyValue { return DBOperationKey.String(val) } // Connection-level attributes for Microsoft SQL Server const ( // DBMSSQLInstanceNameKey is the attribute Key conforming to the // "db.mssql.instance_name" semantic conventions. It represents the // Microsoft SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named // instance. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // DBMSSQLInstanceName returns an attribute KeyValue conforming to the // "db.mssql.instance_name" semantic conventions. It represents the Microsoft // SQL Server [instance // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. func DBMSSQLInstanceName(val string) attribute.KeyValue { return DBMSSQLInstanceNameKey.String(val) } // Call-level attributes for Cassandra const ( // DBCassandraPageSizeKey is the attribute Key conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch // size used for paging, i.e. how many rows will be returned at once. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // DBCassandraConsistencyLevelKey is the attribute Key conforming to the // "db.cassandra.consistency_level" semantic conventions. It represents the // consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // RequirementLevel: Optional // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // DBCassandraTableKey is the attribute Key conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace // name (if applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra // rather than sql. It is not recommended to attempt any client-side // parsing of `db.statement` just to get this property, but it should be // set if it is provided by the library being instrumented. If the // operation is acting upon an anonymous table, or more than one table, // this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // DBCassandraIdempotenceKey is the attribute Key conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the // whether or not the query is idempotent. // // Type: boolean // RequirementLevel: Optional // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming // to the "db.cassandra.speculative_execution_count" semantic conventions. // It represents the number of times a query was speculatively executed. // Not set or `0` if the query was not executed speculatively. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // DBCassandraCoordinatorIDKey is the attribute Key conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID // of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // DBCassandraCoordinatorDCKey is the attribute Key conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the // data center of the coordinating node for a query. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // DBCassandraPageSize returns an attribute KeyValue conforming to the // "db.cassandra.page_size" semantic conventions. It represents the fetch size // used for paging, i.e. how many rows will be returned at once. func DBCassandraPageSize(val int) attribute.KeyValue { return DBCassandraPageSizeKey.Int(val) } // DBCassandraTable returns an attribute KeyValue conforming to the // "db.cassandra.table" semantic conventions. It represents the name of the // primary table that the operation is acting upon, including the keyspace name // (if applicable). func DBCassandraTable(val string) attribute.KeyValue { return DBCassandraTableKey.String(val) } // DBCassandraIdempotence returns an attribute KeyValue conforming to the // "db.cassandra.idempotence" semantic conventions. It represents the whether // or not the query is idempotent. func DBCassandraIdempotence(val bool) attribute.KeyValue { return DBCassandraIdempotenceKey.Bool(val) } // DBCassandraSpeculativeExecutionCount returns an attribute KeyValue // conforming to the "db.cassandra.speculative_execution_count" semantic // conventions. It represents the number of times a query was speculatively // executed. Not set or `0` if the query was not executed speculatively. func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { return DBCassandraSpeculativeExecutionCountKey.Int(val) } // DBCassandraCoordinatorID returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.id" semantic conventions. It represents the ID of // the coordinating node for a query. func DBCassandraCoordinatorID(val string) attribute.KeyValue { return DBCassandraCoordinatorIDKey.String(val) } // DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the // "db.cassandra.coordinator.dc" semantic conventions. It represents the data // center of the coordinating node for a query. func DBCassandraCoordinatorDC(val string) attribute.KeyValue { return DBCassandraCoordinatorDCKey.String(val) } // Call-level attributes for Redis const ( // DBRedisDBIndexKey is the attribute Key conforming to the // "db.redis.database_index" semantic conventions. It represents the index // of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To // be used instead of the generic `db.name` attribute. // // Type: int // RequirementLevel: ConditionallyRequired (If other than the default // database (`0`).) // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // DBRedisDBIndex returns an attribute KeyValue conforming to the // "db.redis.database_index" semantic conventions. It represents the index of // the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be // used instead of the generic `db.name` attribute. func DBRedisDBIndex(val int) attribute.KeyValue { return DBRedisDBIndexKey.Int(val) } // Call-level attributes for MongoDB const ( // DBMongoDBCollectionKey is the attribute Key conforming to the // "db.mongodb.collection" semantic conventions. It represents the // collection being accessed within the database stated in `db.name`. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // DBMongoDBCollection returns an attribute KeyValue conforming to the // "db.mongodb.collection" semantic conventions. It represents the collection // being accessed within the database stated in `db.name`. func DBMongoDBCollection(val string) attribute.KeyValue { return DBMongoDBCollectionKey.String(val) } // Call-level attributes for SQL databases const ( // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that // the operation is acting upon, including the database name (if // applicable). // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting // upon an anonymous table, or more than one table, this value MUST NOT be // set. DBSQLTableKey = attribute.Key("db.sql.table") ) // DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" // semantic conventions. It represents the name of the primary table that the // operation is acting upon, including the database name (if applicable). func DBSQLTable(val string) attribute.KeyValue { return DBSQLTableKey.String(val) } // Call-level attributes for Cosmos DB. const ( // DBCosmosDBClientIDKey is the attribute Key conforming to the // "db.cosmosdb.client_id" semantic conventions. It represents the unique // Cosmos client instance id. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") // DBCosmosDBOperationTypeKey is the attribute Key conforming to the // "db.cosmosdb.operation_type" semantic conventions. It represents the // cosmosDB Operation Type. // // Type: Enum // RequirementLevel: ConditionallyRequired (when performing one of the // operations in this list) // Stability: stable DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") // DBCosmosDBConnectionModeKey is the attribute Key conforming to the // "db.cosmosdb.connection_mode" semantic conventions. It represents the // cosmos client connection mode. // // Type: Enum // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as // default)) // Stability: stable DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") // DBCosmosDBContainerKey is the attribute Key conforming to the // "db.cosmosdb.container" semantic conventions. It represents the cosmos // DB container name. // // Type: string // RequirementLevel: ConditionallyRequired (if available) // Stability: stable // Examples: 'anystring' DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the // "db.cosmosdb.request_content_length" semantic conventions. It represents // the request payload size in bytes // // Type: int // RequirementLevel: Optional // Stability: stable DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") // DBCosmosDBStatusCodeKey is the attribute Key conforming to the // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos // DB status code. // // Type: int // RequirementLevel: ConditionallyRequired (if response was received) // Stability: stable // Examples: 200, 201 DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the // "db.cosmosdb.sub_status_code" semantic conventions. It represents the // cosmos DB sub status code. // // Type: int // RequirementLevel: ConditionallyRequired (when response was received and // contained sub-code.) // Stability: stable // Examples: 1000, 1002 DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") // DBCosmosDBRequestChargeKey is the attribute Key conforming to the // "db.cosmosdb.request_charge" semantic conventions. It represents the rU // consumed for that operation // // Type: double // RequirementLevel: ConditionallyRequired (when available) // Stability: stable // Examples: 46.18, 1.0 DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") ) var ( // invalid DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") // create DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") // patch DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") // read DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") // read_feed DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") // delete DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") // replace DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") // execute DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") // query DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") // head DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") // head_feed DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") // upsert DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") // batch DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") // query_plan DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") // execute_javascript DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") ) var ( // Gateway (HTTP) connections mode DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") // Direct connection DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") ) // DBCosmosDBClientID returns an attribute KeyValue conforming to the // "db.cosmosdb.client_id" semantic conventions. It represents the unique // Cosmos client instance id. func DBCosmosDBClientID(val string) attribute.KeyValue { return DBCosmosDBClientIDKey.String(val) } // DBCosmosDBContainer returns an attribute KeyValue conforming to the // "db.cosmosdb.container" semantic conventions. It represents the cosmos DB // container name. func DBCosmosDBContainer(val string) attribute.KeyValue { return DBCosmosDBContainerKey.String(val) } // DBCosmosDBRequestContentLength returns an attribute KeyValue conforming // to the "db.cosmosdb.request_content_length" semantic conventions. It // represents the request payload size in bytes func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { return DBCosmosDBRequestContentLengthKey.Int(val) } // DBCosmosDBStatusCode returns an attribute KeyValue conforming to the // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB // status code. func DBCosmosDBStatusCode(val int) attribute.KeyValue { return DBCosmosDBStatusCodeKey.Int(val) } // DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the // "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos // DB sub status code. func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { return DBCosmosDBSubStatusCodeKey.Int(val) } // DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the // "db.cosmosdb.request_charge" semantic conventions. It represents the rU // consumed for that operation func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { return DBCosmosDBRequestChargeKey.Float64(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's // concepts. const ( // OTelStatusCodeKey is the attribute Key conforming to the // "otel.status_code" semantic conventions. It represents the name of the // code, either "OK" or "ERROR". MUST NOT be set if the status code is // UNSET. // // Type: Enum // RequirementLevel: Optional // Stability: stable OTelStatusCodeKey = attribute.Key("otel.status_code") // OTelStatusDescriptionKey is the attribute Key conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'resource not found' OTelStatusDescriptionKey = attribute.Key("otel.status_description") ) var ( // The operation has been validated by an Application developer or Operator to have completed successfully OTelStatusCodeOk = OTelStatusCodeKey.String("OK") // The operation contains an error OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") ) // OTelStatusDescription returns an attribute KeyValue conforming to the // "otel.status_description" semantic conventions. It represents the // description of the Status if it has a value, otherwise not set. func OTelStatusDescription(val string) attribute.KeyValue { return OTelStatusDescriptionKey.String(val) } // This semantic convention describes an instance of a function that runs // without provisioning or managing of servers (also known as serverless // functions or Function as a Service (FaaS)) with spans. const ( // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" // semantic conventions. It represents the type of the trigger which caused // this function invocation. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // FaaSInvocationIDKey is the attribute Key conforming to the // "faas.invocation_id" semantic conventions. It represents the invocation // ID of the current function invocation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSInvocationIDKey = attribute.Key("faas.invocation_id") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // FaaSInvocationID returns an attribute KeyValue conforming to the // "faas.invocation_id" semantic conventions. It represents the invocation ID // of the current function invocation. func FaaSInvocationID(val string) attribute.KeyValue { return FaaSInvocationIDKey.String(val) } // Semantic Convention for FaaS triggered as a response to some data source // operation such as a database or filesystem read/write. const ( // FaaSDocumentCollectionKey is the attribute Key conforming to the // "faas.document.collection" semantic conventions. It represents the name // of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in // Cosmos DB to the database name. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // FaaSDocumentOperationKey is the attribute Key conforming to the // "faas.document.operation" semantic conventions. It represents the // describes the type of the operation that was performed on the data. // // Type: Enum // RequirementLevel: Required // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // FaaSDocumentTimeKey is the attribute Key conforming to the // "faas.document.time" semantic conventions. It represents a string // containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // FaaSDocumentNameKey is the attribute Key conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or // S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // FaaSDocumentCollection returns an attribute KeyValue conforming to the // "faas.document.collection" semantic conventions. It represents the name of // the source on which the triggering operation was performed. For example, in // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the // database name. func FaaSDocumentCollection(val string) attribute.KeyValue { return FaaSDocumentCollectionKey.String(val) } // FaaSDocumentTime returns an attribute KeyValue conforming to the // "faas.document.time" semantic conventions. It represents a string containing // the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSDocumentTime(val string) attribute.KeyValue { return FaaSDocumentTimeKey.String(val) } // FaaSDocumentName returns an attribute KeyValue conforming to the // "faas.document.name" semantic conventions. It represents the document // name/table subjected to the operation. For example, in Cloud Storage or S3 // is the name of the file, and in Cosmos DB the table name. func FaaSDocumentName(val string) attribute.KeyValue { return FaaSDocumentNameKey.String(val) } // Semantic Convention for FaaS scheduled to be executed regularly. const ( // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic // conventions. It represents a string containing the function invocation // time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic // conventions. It represents a string containing the schedule period as // [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // FaaSTime returns an attribute KeyValue conforming to the "faas.time" // semantic conventions. It represents a string containing the function // invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). func FaaSTime(val string) attribute.KeyValue { return FaaSTimeKey.String(val) } // FaaSCron returns an attribute KeyValue conforming to the "faas.cron" // semantic conventions. It represents a string containing the schedule period // as [Cron // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). func FaaSCron(val string) attribute.KeyValue { return FaaSCronKey.String(val) } // Contains additional attributes for incoming FaaS spans. const ( // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" // semantic conventions. It represents a boolean that is true if the // serverless function is executed for the first time (aka cold-start). // // Type: boolean // RequirementLevel: Optional // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // FaaSColdstart returns an attribute KeyValue conforming to the // "faas.coldstart" semantic conventions. It represents a boolean that is true // if the serverless function is executed for the first time (aka cold-start). func FaaSColdstart(val bool) attribute.KeyValue { return FaaSColdstartKey.Bool(val) } // Contains additional attributes for outgoing FaaS spans. const ( // FaaSInvokedNameKey is the attribute Key conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the // invoked function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // FaaSInvokedProviderKey is the attribute Key conforming to the // "faas.invoked_provider" semantic conventions. It represents the cloud // provider of the invoked function. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the // invoked function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // FaaSInvokedRegionKey is the attribute Key conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud // region of the invoked function. // // Type: string // RequirementLevel: ConditionallyRequired (For some cloud providers, like // AWS or GCP, the region in which a function is hosted is essential to // uniquely identify the function and also part of its endpoint. Since it's // part of the endpoint being called, the region is always known to // clients. In these cases, `faas.invoked_region` MUST be set accordingly. // If the region is unknown to the client or not required for identifying // the invoked function, setting `faas.invoked_region` is optional.) // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the // invoked function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // FaaSInvokedName returns an attribute KeyValue conforming to the // "faas.invoked_name" semantic conventions. It represents the name of the // invoked function. func FaaSInvokedName(val string) attribute.KeyValue { return FaaSInvokedNameKey.String(val) } // FaaSInvokedRegion returns an attribute KeyValue conforming to the // "faas.invoked_region" semantic conventions. It represents the cloud region // of the invoked function. func FaaSInvokedRegion(val string) attribute.KeyValue { return FaaSInvokedRegionKey.String(val) } // Operations that access some remote service. const ( // PeerServiceKey is the attribute Key conforming to the "peer.service" // semantic conventions. It represents the // [`service.name`](/docs/resource/README.md#service) of the remote // service. SHOULD be equal to the actual `service.name` resource attribute // of the remote service if any. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // PeerService returns an attribute KeyValue conforming to the // "peer.service" semantic conventions. It represents the // [`service.name`](/docs/resource/README.md#service) of the remote service. // SHOULD be equal to the actual `service.name` resource attribute of the // remote service if any. func PeerService(val string) attribute.KeyValue { return PeerServiceKey.String(val) } // These attributes may be used for any operation with an authenticated and/or // authorized enduser. const ( // EnduserIDKey is the attribute Key conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted // from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header // in the inbound request from outside the system. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // EnduserRoleKey is the attribute Key conforming to the "enduser.role" // semantic conventions. It represents the actual/assumed role the client // is making the request under extracted from token or application security // context. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" // semantic conventions. It represents the scopes or granted authorities // the client currently possesses extracted from token or application // security context. The value would come from the scope associated with an // [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // EnduserID returns an attribute KeyValue conforming to the "enduser.id" // semantic conventions. It represents the username or client_id extracted from // the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in // the inbound request from outside the system. func EnduserID(val string) attribute.KeyValue { return EnduserIDKey.String(val) } // EnduserRole returns an attribute KeyValue conforming to the // "enduser.role" semantic conventions. It represents the actual/assumed role // the client is making the request under extracted from token or application // security context. func EnduserRole(val string) attribute.KeyValue { return EnduserRoleKey.String(val) } // EnduserScope returns an attribute KeyValue conforming to the // "enduser.scope" semantic conventions. It represents the scopes or granted // authorities the client currently possesses extracted from token or // application security context. The value would come from the scope associated // with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute // value in a [SAML 2.0 // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). func EnduserScope(val string) attribute.KeyValue { return EnduserScopeKey.String(val) } // These attributes may be used for any operation to store information about a // thread that started a span. const ( // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic // conventions. It represents the current "managed" thread ID (as opposed // to OS thread ID). // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // ThreadNameKey is the attribute Key conforming to the "thread.name" // semantic conventions. It represents the current thread name. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // ThreadID returns an attribute KeyValue conforming to the "thread.id" // semantic conventions. It represents the current "managed" thread ID (as // opposed to OS thread ID). func ThreadID(val int) attribute.KeyValue { return ThreadIDKey.Int(val) } // ThreadName returns an attribute KeyValue conforming to the "thread.name" // semantic conventions. It represents the current thread name. func ThreadName(val string) attribute.KeyValue { return ThreadNameKey.String(val) } // These attributes allow to report this unit of code and therefore to provide // more context about the span. const ( // CodeFunctionKey is the attribute Key conforming to the "code.function" // semantic conventions. It represents the method or function name, or // equivalent (usually rightmost part of the code unit's name). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" // semantic conventions. It represents the "namespace" within which // `code.function` is defined. Usually the qualified class or module name, // such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // CodeFilepathKey is the attribute Key conforming to the "code.filepath" // semantic conventions. It represents the source code file name that // identifies the code unit as uniquely as possible (preferably an absolute // file path). // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") // CodeColumnKey is the attribute Key conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit // named in `code.function`. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 16 CodeColumnKey = attribute.Key("code.column") ) // CodeFunction returns an attribute KeyValue conforming to the // "code.function" semantic conventions. It represents the method or function // name, or equivalent (usually rightmost part of the code unit's name). func CodeFunction(val string) attribute.KeyValue { return CodeFunctionKey.String(val) } // CodeNamespace returns an attribute KeyValue conforming to the // "code.namespace" semantic conventions. It represents the "namespace" within // which `code.function` is defined. Usually the qualified class or module // name, such that `code.namespace` + some separator + `code.function` form a // unique identifier for the code unit. func CodeNamespace(val string) attribute.KeyValue { return CodeNamespaceKey.String(val) } // CodeFilepath returns an attribute KeyValue conforming to the // "code.filepath" semantic conventions. It represents the source code file // name that identifies the code unit as uniquely as possible (preferably an // absolute file path). func CodeFilepath(val string) attribute.KeyValue { return CodeFilepathKey.String(val) } // CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" // semantic conventions. It represents the line number in `code.filepath` best // representing the operation. It SHOULD point within the code unit named in // `code.function`. func CodeLineNumber(val int) attribute.KeyValue { return CodeLineNumberKey.Int(val) } // CodeColumn returns an attribute KeyValue conforming to the "code.column" // semantic conventions. It represents the column number in `code.filepath` // best representing the operation. It SHOULD point within the code unit named // in `code.function`. func CodeColumn(val int) attribute.KeyValue { return CodeColumnKey.Int(val) } // Semantic Convention for HTTP Client const ( // HTTPResendCountKey is the attribute Key conforming to the // "http.resend_count" semantic conventions. It represents the ordinal // number of request resending attempt (for any reason, including // redirects). // // Type: int // RequirementLevel: Recommended (if and only if request was retried.) // Stability: stable // Examples: 3 // Note: The resend count SHOULD be updated each time an HTTP request gets // resent by the client, regardless of what was the cause of the resending // (e.g. redirection, authorization failure, 503 Server Unavailable, // network issues, or any other). HTTPResendCountKey = attribute.Key("http.resend_count") ) // HTTPResendCount returns an attribute KeyValue conforming to the // "http.resend_count" semantic conventions. It represents the ordinal number // of request resending attempt (for any reason, including redirects). func HTTPResendCount(val int) attribute.KeyValue { return HTTPResendCountKey.Int(val) } // The `aws` conventions apply to operations using the AWS SDK. They map // request or response parameters in AWS SDK API calls to attributes on a Span. // The conventions have been collected over time based on feedback from AWS // users of tracing and will continue to evolve as new interesting conventions // are found. // Some descriptions are also provided for populating general OpenTelemetry // semantic conventions based on these APIs. const ( // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" // semantic conventions. It represents the AWS request ID as returned in // the response headers `x-amz-request-id` or `x-amz-requestid`. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' AWSRequestIDKey = attribute.Key("aws.request_id") ) // AWSRequestID returns an attribute KeyValue conforming to the // "aws.request_id" semantic conventions. It represents the AWS request ID as // returned in the response headers `x-amz-request-id` or `x-amz-requestid`. func AWSRequestID(val string) attribute.KeyValue { return AWSRequestIDKey.String(val) } // Attributes that exist for multiple DynamoDB request types. const ( // AWSDynamoDBTableNamesKey is the attribute Key conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys // in the `RequestItems` object field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": // { "CapacityUnits": number, "ReadCapacityUnits": number, // "WriteCapacityUnits": number }, "TableName": "string", // "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to // the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` // response field. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, // "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` // request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. // It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // RequirementLevel: Optional // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the // value of the `ConsistentRead` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // AWSDynamoDBProjectionKey is the attribute Key conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value // of the `ProjectionExpression` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, // RelatedItems, ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // AWSDynamoDBLimitKey is the attribute Key conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of // the `Limit` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // AWSDynamoDBIndexNameKey is the attribute Key conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value // of the `IndexName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // AWSDynamoDBSelectKey is the attribute Key conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of // the `Select` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // AWSDynamoDBTableNames returns an attribute KeyValue conforming to the // "aws.dynamodb.table_names" semantic conventions. It represents the keys in // the `RequestItems` object field. func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { return AWSDynamoDBTableNamesKey.StringSlice(val) } // AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to // the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the // JSON-serialized value of each item in the `ConsumedCapacity` response field. func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { return AWSDynamoDBConsumedCapacityKey.StringSlice(val) } // AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming // to the "aws.dynamodb.item_collection_metrics" semantic conventions. It // represents the JSON-serialized value of the `ItemCollectionMetrics` response // field. func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { return AWSDynamoDBItemCollectionMetricsKey.String(val) } // AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_read_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.ReadCapacityUnits` request parameter. func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) } // AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue // conforming to the "aws.dynamodb.provisioned_write_capacity" semantic // conventions. It represents the value of the // `ProvisionedThroughput.WriteCapacityUnits` request parameter. func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) } // AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the // "aws.dynamodb.consistent_read" semantic conventions. It represents the value // of the `ConsistentRead` request parameter. func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { return AWSDynamoDBConsistentReadKey.Bool(val) } // AWSDynamoDBProjection returns an attribute KeyValue conforming to the // "aws.dynamodb.projection" semantic conventions. It represents the value of // the `ProjectionExpression` request parameter. func AWSDynamoDBProjection(val string) attribute.KeyValue { return AWSDynamoDBProjectionKey.String(val) } // AWSDynamoDBLimit returns an attribute KeyValue conforming to the // "aws.dynamodb.limit" semantic conventions. It represents the value of the // `Limit` request parameter. func AWSDynamoDBLimit(val int) attribute.KeyValue { return AWSDynamoDBLimitKey.Int(val) } // AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to // the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the // value of the `AttributesToGet` request parameter. func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { return AWSDynamoDBAttributesToGetKey.StringSlice(val) } // AWSDynamoDBIndexName returns an attribute KeyValue conforming to the // "aws.dynamodb.index_name" semantic conventions. It represents the value of // the `IndexName` request parameter. func AWSDynamoDBIndexName(val string) attribute.KeyValue { return AWSDynamoDBIndexNameKey.String(val) } // AWSDynamoDBSelect returns an attribute KeyValue conforming to the // "aws.dynamodb.select" semantic conventions. It represents the value of the // `Select` request parameter. func AWSDynamoDBSelect(val string) attribute.KeyValue { return AWSDynamoDBSelectKey.String(val) } // DynamoDB.CreateTable const ( // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_indexes" semantic // conventions. It represents the JSON-serialized value of each item of the // `GlobalSecondaryIndexes` request field func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) } // AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming // to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It // represents the JSON-serialized value of each item of the // `LocalSecondaryIndexes` request field. func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) } // DynamoDB.ListTables const ( // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents // the value of the `ExclusiveStartTableName` request parameter. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // AWSDynamoDBTableCountKey is the attribute Key conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming // to the "aws.dynamodb.exclusive_start_table" semantic conventions. It // represents the value of the `ExclusiveStartTableName` request parameter. func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { return AWSDynamoDBExclusiveStartTableKey.String(val) } // AWSDynamoDBTableCount returns an attribute KeyValue conforming to the // "aws.dynamodb.table_count" semantic conventions. It represents the the // number of items in the `TableNames` response parameter. func AWSDynamoDBTableCount(val int) attribute.KeyValue { return AWSDynamoDBTableCountKey.Int(val) } // DynamoDB.Query const ( // AWSDynamoDBScanForwardKey is the attribute Key conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the // value of the `ScanIndexForward` request parameter. // // Type: boolean // RequirementLevel: Optional // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // AWSDynamoDBScanForward returns an attribute KeyValue conforming to the // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of // the `ScanIndexForward` request parameter. func AWSDynamoDBScanForward(val bool) attribute.KeyValue { return AWSDynamoDBScanForwardKey.Bool(val) } // DynamoDB.Scan const ( // AWSDynamoDBSegmentKey is the attribute Key conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of // the `Segment` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the // value of the `TotalSegments` request parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // AWSDynamoDBCountKey is the attribute Key conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of // the `Count` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // AWSDynamoDBScannedCountKey is the attribute Key conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the // value of the `ScannedCount` response parameter. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // AWSDynamoDBSegment returns an attribute KeyValue conforming to the // "aws.dynamodb.segment" semantic conventions. It represents the value of the // `Segment` request parameter. func AWSDynamoDBSegment(val int) attribute.KeyValue { return AWSDynamoDBSegmentKey.Int(val) } // AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the // "aws.dynamodb.total_segments" semantic conventions. It represents the value // of the `TotalSegments` request parameter. func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { return AWSDynamoDBTotalSegmentsKey.Int(val) } // AWSDynamoDBCount returns an attribute KeyValue conforming to the // "aws.dynamodb.count" semantic conventions. It represents the value of the // `Count` response parameter. func AWSDynamoDBCount(val int) attribute.KeyValue { return AWSDynamoDBCountKey.Int(val) } // AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the // "aws.dynamodb.scanned_count" semantic conventions. It represents the value // of the `ScannedCount` response parameter. func AWSDynamoDBScannedCount(val int) attribute.KeyValue { return AWSDynamoDBScannedCountKey.Int(val) } // DynamoDB.UpdateTable const ( // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to // the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the // the `GlobalSecondaryIndexUpdates` request field. // // Type: string[] // RequirementLevel: Optional // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, // "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming // to the "aws.dynamodb.attribute_definitions" semantic conventions. It // represents the JSON-serialized value of each item in the // `AttributeDefinitions` request field. func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) } // AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic // conventions. It represents the JSON-serialized value of each item in the the // `GlobalSecondaryIndexUpdates` request field. func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) } // Attributes that exist for S3 request types. const ( // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" // semantic conventions. It represents the S3 bucket name the request // refers to. Corresponds to the `--bucket` parameter of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // operations. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'some-bucket-name' // Note: The `bucket` attribute is applicable to all S3 operations that // reference a bucket, i.e. that require the bucket name as a mandatory // parameter. // This applies to almost all S3 operations except `list-buckets`. AWSS3BucketKey = attribute.Key("aws.s3.bucket") // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic // conventions. It represents the S3 object key the request refers to. // Corresponds to the `--key` parameter of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // operations. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'someFile.yml' // Note: The `key` attribute is applicable to all object-related S3 // operations, i.e. that require the object key as a mandatory parameter. // This applies in particular to the following operations: // // - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) // - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) // - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) // - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) // - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) // - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) // - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) // - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) // - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) // - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) // - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) // - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) // - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) AWSS3KeyKey = attribute.Key("aws.s3.key") // AWSS3CopySourceKey is the attribute Key conforming to the // "aws.s3.copy_source" semantic conventions. It represents the source // object (in the form `bucket`/`key`) for the copy operation. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'someFile.yml' // Note: The `copy_source` attribute applies to S3 copy operations and // corresponds to the `--copy-source` parameter // of the [copy-object operation within the S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). // This applies in particular to the following operations: // // - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) // - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") // AWSS3UploadIDKey is the attribute Key conforming to the // "aws.s3.upload_id" semantic conventions. It represents the upload ID // that identifies the multipart upload. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' // Note: The `upload_id` attribute applies to S3 multipart-upload // operations and corresponds to the `--upload-id` parameter // of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // multipart operations. // This applies in particular to the following operations: // // - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) // - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) // - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) // - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) // - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" // semantic conventions. It represents the delete request container that // specifies the objects to be deleted. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' // Note: The `delete` attribute is only applicable to the // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) // operation. // The `delete` attribute corresponds to the `--delete` parameter of the // [delete-objects operation within the S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). AWSS3DeleteKey = attribute.Key("aws.s3.delete") // AWSS3PartNumberKey is the attribute Key conforming to the // "aws.s3.part_number" semantic conventions. It represents the part number // of the part being uploaded in a multipart-upload operation. This is a // positive integer between 1 and 10,000. // // Type: int // RequirementLevel: Optional // Stability: stable // Examples: 3456 // Note: The `part_number` attribute is only applicable to the // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) // and // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) // operations. // The `part_number` attribute corresponds to the `--part-number` parameter // of the // [upload-part operation within the S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") ) // AWSS3Bucket returns an attribute KeyValue conforming to the // "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the // request refers to. Corresponds to the `--bucket` parameter of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // operations. func AWSS3Bucket(val string) attribute.KeyValue { return AWSS3BucketKey.String(val) } // AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" // semantic conventions. It represents the S3 object key the request refers to. // Corresponds to the `--key` parameter of the [S3 // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) // operations. func AWSS3Key(val string) attribute.KeyValue { return AWSS3KeyKey.String(val) } // AWSS3CopySource returns an attribute KeyValue conforming to the // "aws.s3.copy_source" semantic conventions. It represents the source object // (in the form `bucket`/`key`) for the copy operation. func AWSS3CopySource(val string) attribute.KeyValue { return AWSS3CopySourceKey.String(val) } // AWSS3UploadID returns an attribute KeyValue conforming to the // "aws.s3.upload_id" semantic conventions. It represents the upload ID that // identifies the multipart upload. func AWSS3UploadID(val string) attribute.KeyValue { return AWSS3UploadIDKey.String(val) } // AWSS3Delete returns an attribute KeyValue conforming to the // "aws.s3.delete" semantic conventions. It represents the delete request // container that specifies the objects to be deleted. func AWSS3Delete(val string) attribute.KeyValue { return AWSS3DeleteKey.String(val) } // AWSS3PartNumber returns an attribute KeyValue conforming to the // "aws.s3.part_number" semantic conventions. It represents the part number of // the part being uploaded in a multipart-upload operation. This is a positive // integer between 1 and 10,000. func AWSS3PartNumber(val int) attribute.KeyValue { return AWSS3PartNumberKey.Int(val) } // Semantic conventions to apply when instrumenting the GraphQL implementation. // They map GraphQL operations to attributes on a Span. const ( // GraphqlOperationNameKey is the attribute Key conforming to the // "graphql.operation.name" semantic conventions. It represents the name of // the operation being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'findBookByID' GraphqlOperationNameKey = attribute.Key("graphql.operation.name") // GraphqlOperationTypeKey is the attribute Key conforming to the // "graphql.operation.type" semantic conventions. It represents the type of // the operation being executed. // // Type: Enum // RequirementLevel: Optional // Stability: stable // Examples: 'query', 'mutation', 'subscription' GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") // GraphqlDocumentKey is the attribute Key conforming to the // "graphql.document" semantic conventions. It represents the GraphQL // document being executed. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'query findBookByID { bookByID(id: ?) { name } }' // Note: The value may be sanitized to exclude sensitive information. GraphqlDocumentKey = attribute.Key("graphql.document") ) var ( // GraphQL query GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") // GraphQL mutation GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") // GraphQL subscription GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") ) // GraphqlOperationName returns an attribute KeyValue conforming to the // "graphql.operation.name" semantic conventions. It represents the name of the // operation being executed. func GraphqlOperationName(val string) attribute.KeyValue { return GraphqlOperationNameKey.String(val) } // GraphqlDocument returns an attribute KeyValue conforming to the // "graphql.document" semantic conventions. It represents the GraphQL document // being executed. func GraphqlDocument(val string) attribute.KeyValue { return GraphqlDocumentKey.String(val) } // General attributes used in messaging systems. const ( // MessagingSystemKey is the attribute Key conforming to the // "messaging.system" semantic conventions. It represents a string // identifying the messaging system. // // Type: string // RequirementLevel: Required // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // MessagingOperationKey is the attribute Key conforming to the // "messaging.operation" semantic conventions. It represents a string // identifying the kind of messaging operation as defined in the [Operation // names](#operation-names) section above. // // Type: Enum // RequirementLevel: Required // Stability: stable // Note: If a custom value is used, it MUST be of low cardinality. MessagingOperationKey = attribute.Key("messaging.operation") // MessagingBatchMessageCountKey is the attribute Key conforming to the // "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the // batching operation. // // Type: int // RequirementLevel: ConditionallyRequired (If the span describes an // operation on a batch of messages.) // Stability: stable // Examples: 0, 1, 2 // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on // spans that operate with a single message. When a messaging client // library supports both batch and single-message API for the same // operation, instrumentations SHOULD use `messaging.batch.message_count` // for batching APIs and SHOULD NOT use it for single-message APIs. MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") // MessagingClientIDKey is the attribute Key conforming to the // "messaging.client_id" semantic conventions. It represents a unique // identifier for the client that consumes or produces a message. // // Type: string // RequirementLevel: Recommended (If a client id is available) // Stability: stable // Examples: 'client-5', 'myhost@8742@s8083jm' MessagingClientIDKey = attribute.Key("messaging.client_id") ) var ( // publish MessagingOperationPublish = MessagingOperationKey.String("publish") // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // MessagingSystem returns an attribute KeyValue conforming to the // "messaging.system" semantic conventions. It represents a string identifying // the messaging system. func MessagingSystem(val string) attribute.KeyValue { return MessagingSystemKey.String(val) } // MessagingBatchMessageCount returns an attribute KeyValue conforming to // the "messaging.batch.message_count" semantic conventions. It represents the // number of messages sent, received, or processed in the scope of the batching // operation. func MessagingBatchMessageCount(val int) attribute.KeyValue { return MessagingBatchMessageCountKey.Int(val) } // MessagingClientID returns an attribute KeyValue conforming to the // "messaging.client_id" semantic conventions. It represents a unique // identifier for the client that consumes or produces a message. func MessagingClientID(val string) attribute.KeyValue { return MessagingClientIDKey.String(val) } // Semantic conventions for remote procedure calls. const ( // RPCSystemKey is the attribute Key conforming to the "rpc.system" // semantic conventions. It represents a string identifying the remoting // system. See below for a list of well-known identifiers. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCSystemKey = attribute.Key("rpc.system") // RPCServiceKey is the attribute Key conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the // service being called, including its package name, if applicable. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing // class. The `code.namespace` attribute may be used to store the latter // (despite the attribute name, it may include a class name; e.g., class // with method actually executing the call on the server side, RPC client // stub class on the client side). RPCServiceKey = attribute.Key("rpc.service") // RPCMethodKey is the attribute Key conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method // being called, must be equal to the $method part in the span name. // // Type: string // RequirementLevel: Recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the // latter (e.g., method actually executing the call on the server side, RPC // client stub method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) var ( // gRPC RPCSystemGRPC = RPCSystemKey.String("grpc") // Java RMI RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") // .NET WCF RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") // Apache Dubbo RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") // Connect RPC RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") ) // RPCService returns an attribute KeyValue conforming to the "rpc.service" // semantic conventions. It represents the full (logical) name of the service // being called, including its package name, if applicable. func RPCService(val string) attribute.KeyValue { return RPCServiceKey.String(val) } // RPCMethod returns an attribute KeyValue conforming to the "rpc.method" // semantic conventions. It represents the name of the (logical) method being // called, must be equal to the $method part in the span name. func RPCMethod(val string) attribute.KeyValue { return RPCMethodKey.String(val) } // Tech-specific attributes for gRPC. const ( // RPCGRPCStatusCodeKey is the attribute Key conforming to the // "rpc.grpc.status_code" semantic conventions. It represents the [numeric // status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of // the gRPC request. // // Type: Enum // RequirementLevel: Required // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // RPCJsonrpcVersionKey is the attribute Key conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. // // Type: string // RequirementLevel: ConditionallyRequired (If other than the default // version (`1.0`)) // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // RPCJsonrpcRequestIDKey is the attribute Key conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be // cast to string for simplicity. Use empty string in case of `null` value. // Omit entirely if this is a notification. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. // // Type: int // RequirementLevel: ConditionallyRequired (If response is not successful.) // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. // // Type: string // RequirementLevel: Optional // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPCJsonrpcVersion returns an attribute KeyValue conforming to the // "rpc.jsonrpc.version" semantic conventions. It represents the protocol // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 // does not specify this, the value can be omitted. func RPCJsonrpcVersion(val string) attribute.KeyValue { return RPCJsonrpcVersionKey.String(val) } // RPCJsonrpcRequestID returns an attribute KeyValue conforming to the // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` // property of request or response. Since protocol allows id to be int, string, // `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit // entirely if this is a notification. func RPCJsonrpcRequestID(val string) attribute.KeyValue { return RPCJsonrpcRequestIDKey.String(val) } // RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_code" semantic conventions. It represents the // `error.code` property of response if it is an error response. func RPCJsonrpcErrorCode(val int) attribute.KeyValue { return RPCJsonrpcErrorCodeKey.Int(val) } // RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the // "rpc.jsonrpc.error_message" semantic conventions. It represents the // `error.message` property of response if it is an error response. func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { return RPCJsonrpcErrorMessageKey.String(val) } // Tech-specific attributes for Connect RPC. const ( // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the // "rpc.connect_rpc.error_code" semantic conventions. It represents the // [error codes](https://connect.build/docs/protocol/#error-codes) of the // Connect request. Error codes are always string values. // // Type: Enum // RequirementLevel: ConditionallyRequired (If response is not successful // and if error code available.) // Stability: stable RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") ) var ( // cancelled RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") // unknown RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") // invalid_argument RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") // deadline_exceeded RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") // not_found RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") // already_exists RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") // permission_denied RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") // resource_exhausted RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") // failed_precondition RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") // aborted RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") // out_of_range RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") // unimplemented RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") // internal RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") // unavailable RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") // data_loss RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") // unauthenticated RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") ) opentelemetry-go-1.21.0/semconv/v1.4.0/000077500000000000000000000000001452547353200174045ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.4.0/doc.go000066400000000000000000000016621452547353200205050ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.4.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" opentelemetry-go-1.21.0/semconv/v1.4.0/exception.go000066400000000000000000000014271452547353200217350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.4.0/http.go000066400000000000000000000113111452547353200207070ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.4.0/resource.go000066400000000000000000000717531452547353200215770ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" import "go.opentelemetry.io/otel/attribute" // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable // Examples: 'gcp' CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. Refer to your provider's docs // to see the available regions, for example [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure regions](https://azure.microsoft.com/en-us/global- // infrastructure/geographies/), or [Google Cloud // regions](https://cloud.google.com/about/locations). // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Examples: 'aws_ec2', 'azure_vm', 'gcp_compute_engine' // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") ) var ( // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable // Examples: 'ec2', 'fargate' AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") ) // A serverless instance. const ( // The name of the function being executed. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' FaaSNameKey = attribute.Key("faas.name") // The unique ID of the function being executed. // // Type: string // Required: Always // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: For example, in AWS Lambda this field corresponds to the // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html) value, in GCP to the URI of the resource, and in Azure to the // [FunctionDirectory](https://github.com/Azure/azure-functions- // host/wiki/Retrieving-information-about-the-currently-running-function) field. FaaSIDKey = attribute.Key("faas.id") // The version string of the function being executed as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string. // // Type: string // Required: No // Stability: stable // Examples: 'my-function:instance-0001' FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container in a Pod template. // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicasetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicasetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulsetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulsetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonsetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonsetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.4.0/schema.go000066400000000000000000000017121452547353200211740ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.4.0" opentelemetry-go-1.21.0/semconv/v1.4.0/trace.go000066400000000000000000001371651452547353200210460ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.4.0" import "go.opentelemetry.io/otel/attribute" // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // If no [tech-specific attribute](#call-level-attributes-for-specific- // technologies) is defined, this attribute is used to report the name of the // database being accessed. For commands that switch the database, this should be // set to the target database (even if the command fails). // // Type: string // Required: Required, if applicable and no more-specific attribute is defined. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The name of the keyspace being accessed. To be used instead of the generic // `db.name` attribute. // // Type: string // Required: Always // Stability: stable // Examples: 'mykeyspace' DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // schema name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Apache HBase const ( // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being // accessed. To be used instead of the generic `db.name` attribute. // // Type: string // Required: Always // Stability: stable // Examples: 'default' DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attrbiutes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // schema name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#exception-end-example). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger on which the function is executed. // // Type: Enum // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing // invocations, if it is known to the client. This is, for example, not the case, // when the transport layer is abstracted in a FaaS client framework without // access to its configuration. // Stability: stable FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Examples: 'aws' // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable // Examples: 'ip_tcp' NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is // empty or not present, this attribute should be the same. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Examples: '1.0' // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") ) var ( // HTTP 1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP 1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP 2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would identify // the network-level peer, which may be a proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. // // Type: string // Required: Always // Stability: stable // Examples: 'grpc', 'java_rmi', 'wcf' RPCSystemKey = attribute.Key("rpc.system") // The full name of the service being called, including its package name, if // applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' RPCServiceKey = attribute.Key("rpc.service") // The name of the method being called, must be equal to the $method part in the // span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' RPCMethodKey = attribute.Key("rpc.method") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable // Examples: 0, 1, 16 RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `method` property from request. Unlike `rpc.method`, this may not relate to the // actual method being called. Useful for client-side traces since client does not // know what will be called on the server. // // Type: string // Required: Always // Stability: stable // Examples: 'users.create', 'get_users' RPCJsonrpcMethodKey = attribute.Key("rpc.jsonrpc.method") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) opentelemetry-go-1.21.0/semconv/v1.5.0/000077500000000000000000000000001452547353200174055ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.5.0/doc.go000066400000000000000000000016621452547353200205060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.5.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.5.0" opentelemetry-go-1.21.0/semconv/v1.5.0/exception.go000066400000000000000000000014271452547353200217360ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.5.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.5.0/http.go000066400000000000000000000113111452547353200207100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.5.0" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.5.0/resource.go000066400000000000000000000753061452547353200215760ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.5.0" import "go.opentelemetry.io/otel/attribute" // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. Refer to your provider's docs // to see the available regions, for example [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure regions](https://azure.microsoft.com/en-us/global- // infrastructure/geographies/), or [Google Cloud // regions](https://cloud.google.com/about/locations). // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") ) var ( // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback function (which // may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) span attributes). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: Depending on the cloud provider, use: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) with the resolved function version, as the same runtime instance // may be invokable with multiple // different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id). // On some providers, it may not be possible to determine the full ID at startup, // which is why this field cannot be made required. For example, on AWS the // account ID // part of the ARN is not available without calling another AWS API // which may be deemed too slow for a short-running lambda function. // As an alternative, consider setting `faas.id` as a span attribute instead. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // Required: No // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // Required: No // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container in a Pod template. // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.5.0/schema.go000066400000000000000000000017121452547353200211750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.5.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.5.0" opentelemetry-go-1.21.0/semconv/v1.5.0/trace.go000066400000000000000000001407101452547353200210350ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.5.0" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // If no [tech-specific attribute](#call-level-attributes-for-specific- // technologies) is defined, this attribute is used to report the name of the // database being accessed. For commands that switch the database, this should be // set to the target database (even if the command fails). // // Type: string // Required: Required, if applicable and no more-specific attribute is defined. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The name of the keyspace being accessed. To be used instead of the generic // `db.name` attribute. // // Type: string // Required: Always // Stability: stable // Examples: 'mykeyspace' DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // schema name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Apache HBase const ( // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being // accessed. To be used instead of the generic `db.name` attribute. // // Type: string // Required: Always // Stability: stable // Examples: 'default' DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attrbiutes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // schema name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#exception-end-example). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger on which the function is executed. // // Type: Enum // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing // invocations, if it is known to the client. This is, for example, not the case, // when the transport layer is abstracted in a FaaS client framework without // access to its configuration. // Stability: stable FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is // empty or not present, this attribute should be the same. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") ) var ( // HTTP 1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP 1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP 2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would identify // the network-level peer, which may be a proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. // // Type: string // Required: Always // Stability: stable // Examples: 'grpc', 'java_rmi', 'wcf' RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) opentelemetry-go-1.21.0/semconv/v1.6.1/000077500000000000000000000000001452547353200174075ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.6.1/doc.go000066400000000000000000000016621452547353200205100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.6.1 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.6.1" opentelemetry-go-1.21.0/semconv/v1.6.1/exception.go000066400000000000000000000014271452547353200217400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.6.1" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.6.1/http.go000066400000000000000000000113111452547353200207120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.6.1" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.6.1/resource.go000066400000000000000000000761521452547353200216000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.6.1" import "go.opentelemetry.io/otel/attribute" // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. Refer to your provider's docs // to see the available regions, for example [Alibaba Cloud // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure regions](https://azure.microsoft.com/en-us/global- // infrastructure/geographies/), or [Google Cloud // regions](https://cloud.google.com/about/locations). // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback function (which // may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) span attributes). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: Depending on the cloud provider, use: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) with the resolved function version, as the same runtime instance // may be invokable with multiple // different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id). // On some providers, it may not be possible to determine the full ID at startup, // which is why this field cannot be made required. For example, on AWS the // account ID // part of the ARN is not available without calling another AWS API // which may be deemed too slow for a short-running lambda function. // As an alternative, consider setting `faas.id` as a span attribute instead. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // Required: No // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // Required: No // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container in a Pod template. // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.6.1/schema.go000066400000000000000000000017121452547353200211770ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.6.1" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.6.1" opentelemetry-go-1.21.0/semconv/v1.6.1/trace.go000066400000000000000000001502051452547353200210370ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.6.1" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // If no [tech-specific attribute](#call-level-attributes-for-specific- // technologies) is defined, this attribute is used to report the name of the // database being accessed. For commands that switch the database, this should be // set to the target database (even if the command fails). // // Type: string // Required: Required, if applicable and no more-specific attribute is defined. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The name of the keyspace being accessed. To be used instead of the generic // `db.name` attribute. // // Type: string // Required: Always // Stability: stable // Examples: 'mykeyspace' DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // schema name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Apache HBase const ( // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being // accessed. To be used instead of the generic `db.name` attribute. // // Type: string // Required: Always // Stability: stable // Examples: 'default' DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attrbiutes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // schema name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#exception-end-example). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger on which the function is executed. // // Type: Enum // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing // invocations, if it is known to the client. This is, for example, not the case, // when the transport layer is abstracted in a FaaS client framework without // access to its configuration. // Stability: stable FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // The internet connection type currently being used by the host. // // Type: Enum // Required: No // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // Required: No // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // Required: No // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // Required: No // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // Required: No // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // Required: No // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is // empty or not present, this attribute should be the same. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") ) var ( // HTTP 1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP 1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP 2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would identify // the network-level peer, which may be a proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. // // Type: string // Required: Always // Stability: stable // Examples: 'grpc', 'java_rmi', 'wcf' RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) opentelemetry-go-1.21.0/semconv/v1.7.0/000077500000000000000000000000001452547353200174075ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.7.0/doc.go000066400000000000000000000016621452547353200205100ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.7.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" opentelemetry-go-1.21.0/semconv/v1.7.0/exception.go000066400000000000000000000014271452547353200217400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.7.0/http.go000066400000000000000000000113111452547353200207120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.7.0/resource.go000066400000000000000000000761521452547353200216000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" import "go.opentelemetry.io/otel/attribute" // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. Refer to your provider's docs // to see the available regions, for example [Alibaba Cloud // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure regions](https://azure.microsoft.com/en-us/global- // infrastructure/geographies/), or [Google Cloud // regions](https://cloud.google.com/about/locations). // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback function (which // may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) span attributes). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: Depending on the cloud provider, use: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) with the resolved function version, as the same runtime instance // may be invokable with multiple // different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id). // On some providers, it may not be possible to determine the full ID at startup, // which is why this field cannot be made required. For example, on AWS the // account ID // part of the ARN is not available without calling another AWS API // which may be deemed too slow for a short-running lambda function. // As an alternative, consider setting `faas.id` as a span attribute instead. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // Required: No // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // Required: No // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container in a Pod template. // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.7.0/schema.go000066400000000000000000000017121452547353200211770ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.7.0" opentelemetry-go-1.21.0/semconv/v1.7.0/trace.go000066400000000000000000001546311452547353200210460ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // If no [tech-specific attribute](#call-level-attributes-for-specific- // technologies) is defined, this attribute is used to report the name of the // database being accessed. For commands that switch the database, this should be // set to the target database (even if the command fails). // // Type: string // Required: Required, if applicable and no more-specific attribute is defined. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The name of the keyspace being accessed. To be used instead of the generic // `db.name` attribute. // // Type: string // Required: Always // Stability: stable // Examples: 'mykeyspace' DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // schema name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Apache HBase const ( // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being // accessed. To be used instead of the generic `db.name` attribute. // // Type: string // Required: Always // Stability: stable // Examples: 'default' DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attrbiutes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // schema name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#exception-end-example). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger on which the function is executed. // // Type: Enum // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing // invocations, if it is known to the client. This is, for example, not the case, // when the transport layer is abstracted in a FaaS client framework without // access to its configuration. // Stability: stable FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // The internet connection type currently being used by the host. // // Type: Enum // Required: No // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // Required: No // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // Required: No // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // Required: No // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // Required: No // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // Required: No // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header // should also be reported, see note. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' // Note: When the header is present but empty the attribute SHOULD be set to the // empty string. Note that this is a valid situation that is expected in certain // cases, according the aforementioned [section of RFC // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not // set the attribute MUST NOT be set. HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") ) var ( // HTTP 1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP 1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP 2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.peer.ip`, is available even if that other // source just confirms the same value as `net.peer.ip`. // Rationale: For `net.peer.ip`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.peer.ip` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // Required: No // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. // // Type: string // Required: Always // Stability: stable // Examples: 'grpc', 'java_rmi', 'wcf' RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPC received/sent message. const ( // Whether this is a received or sent message. // // Type: Enum // Required: No // Stability: stable MessageTypeKey = attribute.Key("message.type") // MUST be calculated as two different counters starting from `1` one for sent // messages and one for received message. // // Type: int // Required: No // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // Compressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // Uncompressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) opentelemetry-go-1.21.0/semconv/v1.8.0/000077500000000000000000000000001452547353200174105ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.8.0/doc.go000066400000000000000000000016621452547353200205110ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.8.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.8.0" opentelemetry-go-1.21.0/semconv/v1.8.0/exception.go000066400000000000000000000014271452547353200217410ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.8.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.8.0/http.go000066400000000000000000000113111452547353200207130ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.8.0" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.8.0/resource.go000066400000000000000000001005271452547353200215730ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.8.0" import "go.opentelemetry.io/otel/attribute" // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback function (which // may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) span attributes). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: Depending on the cloud provider, use: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) with the resolved function version, as the same runtime instance // may be invokable with multiple // different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id). // On some providers, it may not be possible to determine the full ID at startup, // which is why this field cannot be made required. For example, on AWS the // account ID // part of the ARN is not available without calling another AWS API // which may be deemed too slow for a short-running lambda function. // As an alternative, consider setting `faas.id` as a span attribute instead. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // Required: No // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // Required: No // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.8.0/schema.go000066400000000000000000000017121452547353200212000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.8.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.8.0" opentelemetry-go-1.21.0/semconv/v1.8.0/trace.go000066400000000000000000001610141452547353200210400ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.8.0" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // Required: Required, if applicable. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#exception-end-example). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // Required: No // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // The internet connection type currently being used by the host. // // Type: Enum // Required: No // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // Required: No // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // Required: No // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // Required: No // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // Required: No // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // Required: No // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header // should also be reported, see note. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' // Note: When the header is present but empty the attribute SHOULD be set to the // empty string. Note that this is a valid situation that is expected in certain // cases, according the aforementioned [section of RFC // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not // set the attribute MUST NOT be set. HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") ) var ( // HTTP 1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP 1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP 2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.peer.ip`, is available even if that other // source just confirms the same value as `net.peer.ip`. // Rationale: For `net.peer.ip`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.peer.ip` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // Required: No // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // Required: Always // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // Required: Always // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // Required: Always // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // Type of message. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // Required: No // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // Required: No // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. // // Type: string // Required: Always // Stability: stable // Examples: 'grpc', 'java_rmi', 'wcf' RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPC received/sent message. const ( // Whether this is a received or sent message. // // Type: Enum // Required: No // Stability: stable MessageTypeKey = attribute.Key("message.type") // MUST be calculated as two different counters starting from `1` one for sent // messages and one for received message. // // Type: int // Required: No // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // Compressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // Uncompressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) opentelemetry-go-1.21.0/semconv/v1.9.0/000077500000000000000000000000001452547353200174115ustar00rootroot00000000000000opentelemetry-go-1.21.0/semconv/v1.9.0/doc.go000066400000000000000000000016621452547353200205120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming // patterns for OpenTelemetry things. This package represents the conventions // as of the v1.9.0 version of the OpenTelemetry specification. package semconv // import "go.opentelemetry.io/otel/semconv/v1.9.0" opentelemetry-go-1.21.0/semconv/v1.9.0/exception.go000066400000000000000000000014271452547353200217420ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.9.0" const ( // ExceptionEventName is the name of the Span event representing an exception. ExceptionEventName = "exception" ) opentelemetry-go-1.21.0/semconv/v1.9.0/http.go000066400000000000000000000113111452547353200207140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.9.0" import ( "net/http" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/semconv/internal" "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. var ( HTTPSchemeHTTP = HTTPSchemeKey.String("http") HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) var sc = &internal.SemanticConventions{ EnduserIDKey: EnduserIDKey, HTTPClientIPKey: HTTPClientIPKey, HTTPFlavorKey: HTTPFlavorKey, HTTPHostKey: HTTPHostKey, HTTPMethodKey: HTTPMethodKey, HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, HTTPRouteKey: HTTPRouteKey, HTTPSchemeHTTP: HTTPSchemeHTTP, HTTPSchemeHTTPS: HTTPSchemeHTTPS, HTTPServerNameKey: HTTPServerNameKey, HTTPStatusCodeKey: HTTPStatusCodeKey, HTTPTargetKey: HTTPTargetKey, HTTPURLKey: HTTPURLKey, HTTPUserAgentKey: HTTPUserAgentKey, NetHostIPKey: NetHostIPKey, NetHostNameKey: NetHostNameKey, NetHostPortKey: NetHostPortKey, NetPeerIPKey: NetPeerIPKey, NetPeerNameKey: NetPeerNameKey, NetPeerPortKey: NetPeerPortKey, NetTransportIP: NetTransportIP, NetTransportOther: NetTransportOther, NetTransportTCP: NetTransportTCP, NetTransportUDP: NetTransportUDP, NetTransportUnix: NetTransportUnix, } // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } opentelemetry-go-1.21.0/semconv/v1.9.0/resource.go000066400000000000000000001013171452547353200215720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.9.0" import "go.opentelemetry.io/otel/attribute" // A cloud environment (e.g. GCP, Azure, AWS) const ( // Name of the cloud provider. // // Type: Enum // Required: No // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") // The cloud account ID the resource is assigned to. // // Type: string // Required: No // Stability: stable // Examples: '111111111111', 'opentelemetry' CloudAccountIDKey = attribute.Key("cloud.account.id") // The geographical region the resource is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for example // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- // us/global-infrastructure/geographies/), [Google Cloud // regions](https://cloud.google.com/about/locations), or [Tencent Cloud // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") // Cloud regions often have multiple, isolated locations known as zones to // increase availability. Availability zone represents the zone where the resource // is running. // // Type: string // Required: No // Stability: stable // Examples: 'us-east-1c' // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") // The cloud platform in use. // // Type: Enum // Required: No // Stability: stable // Note: The prefix of the service SHOULD match the one specified in // `cloud.provider`. CloudPlatformKey = attribute.Key("cloud.platform") ) var ( // Alibaba Cloud CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") // Amazon Web Services CloudProviderAWS = CloudProviderKey.String("aws") // Microsoft Azure CloudProviderAzure = CloudProviderKey.String("azure") // Google Cloud Platform CloudProviderGCP = CloudProviderKey.String("gcp") // Tencent Cloud CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( // Alibaba Cloud Elastic Compute Service CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") // Alibaba Cloud Function Compute CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") // AWS Elastic Compute Cloud CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") // AWS Elastic Container Service CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") // AWS Elastic Kubernetes Service CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") // AWS Lambda CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") // AWS Elastic Beanstalk CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") // AWS App Runner CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") // Azure Virtual Machines CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") // Azure Container Instances CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") // Azure Kubernetes Service CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") // Azure Functions CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") // Azure App Service CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") // Google Cloud Kubernetes Engine (GKE) CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") // Google Cloud Functions (GCF) CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Tencent Cloud Cloud Virtual Machine (CVM) CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") // Tencent Cloud Elastic Kubernetes Service (EKS) CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") // Tencent Cloud Serverless Cloud Function (SCF) CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) // Resources used by AWS Elastic Container Service (ECS). const ( // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo // perguide/clusters.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l // aunch_types.html) for an ECS task. // // Type: Enum // Required: No // Stability: stable AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates // t/developerguide/task_definitions.html). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us- // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") // The task definition family this task definition is a member of. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-family' AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") // The revision for this task definition. // // Type: string // Required: No // Stability: stable // Examples: '8', '26' AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") ) var ( // ec2 AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") // fargate AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") ) // Resources used by AWS Elastic Kubernetes Service (EKS). const ( // The ARN of an EKS cluster. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") ) // Resources specific to Amazon Web Services. const ( // The name(s) of the AWS log group(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: '/aws/lambda/my-function', 'opentelemetry-service' // Note: Multiple log groups must be supported for cases like multi-container // applications, where a single application has sidecar containers, and each write // to their own log group. AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") // The Amazon Resource Name(s) (ARN) of the AWS log group(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' // Note: See the [log group ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") // The name(s) of the AWS log stream(s) an application is writing to. // // Type: string[] // Required: No // Stability: stable // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") // The ARN(s) of the AWS log stream(s). // // Type: string[] // Required: No // Stability: stable // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' // Note: See the [log stream ARN format // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain // several log streams, so these ARNs necessarily identify both a log group and a // log stream. AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") ) // A container instance. const ( // Container name used by container runtime. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") // Container ID. Usually a UUID, as for example used to [identify Docker // containers](https://docs.docker.com/engine/reference/run/#container- // identification). The UUID might be abbreviated. // // Type: string // Required: No // Stability: stable // Examples: 'a3bf90e006b2' ContainerIDKey = attribute.Key("container.id") // The container runtime managing this container. // // Type: string // Required: No // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") // Name of the image the container was built on. // // Type: string // Required: No // Stability: stable // Examples: 'gcr.io/opentelemetry/operator' ContainerImageNameKey = attribute.Key("container.image.name") // Container image tag. // // Type: string // Required: No // Stability: stable // Examples: '0.1' ContainerImageTagKey = attribute.Key("container.image.tag") ) // The software deployment. const ( // Name of the [deployment // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka // deployment tier). // // Type: string // Required: No // Stability: stable // Examples: 'staging', 'production' DeploymentEnvironmentKey = attribute.Key("deployment.environment") ) // The device on which the process represented by this resource is running. const ( // A unique identifier representing the device // // Type: string // Required: No // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values outlined // below. This value is not an advertising identifier and MUST NOT be used as // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the // Firebase Installation ID or a globally unique UUID which is persisted across // sessions in your application. More information can be found // [here](https://developer.android.com/training/articles/user-data-ids) on best // practices and exact implementation details. Caution should be taken when // storing personal data or anything which can identify a user. GDPR and data // protection laws may apply, ensure you do your own due diligence. DeviceIDKey = attribute.Key("device.id") // The model identifier for the device // // Type: string // Required: No // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' // Note: It's recommended this value represents a machine readable version of the // model identifier rather than the market or consumer-friendly name of the // device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") // The marketing name for the device model // // Type: string // Required: No // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' // Note: It's recommended this value represents a human readable version of the // device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") // The name of the device manufacturer // // Type: string // Required: No // Stability: stable // Examples: 'Apple', 'Samsung' // Note: The Android OS provides this field via // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). // iOS apps SHOULD hardcode the value `Apple`. DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // A serverless instance. const ( // The name of the single function that this runtime instance executes. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: This is the name of the function as configured/deployed on the FaaS // platform and is usually different from the name of the callback function (which // may be stored in the // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- // general.md#source-code-attributes) span attributes). FaaSNameKey = attribute.Key("faas.name") // The unique ID of the single function that this runtime instance executes. // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' // Note: Depending on the cloud provider, use: // * **AWS Lambda:** The function // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- // namespaces.html). // Take care not to use the "invoked ARN" directly but replace any // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // aliases.html) with the resolved function version, as the same runtime instance // may be invokable with multiple // different aliases. // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- // resource-names) // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- // us/rest/api/resources/resources/get-by-id). // On some providers, it may not be possible to determine the full ID at startup, // which is why this field cannot be made required. For example, on AWS the // account ID // part of the ARN is not available without calling another AWS API // which may be deemed too slow for a short-running lambda function. // As an alternative, consider setting `faas.id` as a span attribute instead. FaaSIDKey = attribute.Key("faas.id") // The immutable version of the function being executed. // // Type: string // Required: No // Stability: stable // Examples: '26', 'pinkfroid-00002' // Note: Depending on the cloud provider and platform, use: // * **AWS Lambda:** The [function // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- // versions.html) // (an integer represented as a decimal string). // * **Google Cloud Run:** The // [revision](https://cloud.google.com/run/docs/managing/revisions) // (i.e., the function name plus the revision suffix). // * **Google Cloud Functions:** The value of the // [`K_REVISION` environment // variable](https://cloud.google.com/functions/docs/env- // var#runtime_environment_variables_set_automatically). // * **Azure Functions:** Not applicable. Do not set this attribute. FaaSVersionKey = attribute.Key("faas.version") // The execution environment ID as a string, that will be potentially reused for // other invocations to the same function/function version. // // Type: string // Required: No // Stability: stable // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' // Note: * **AWS Lambda:** Use the (full) log stream name. FaaSInstanceKey = attribute.Key("faas.instance") // The amount of memory available to the serverless function in MiB. // // Type: int // Required: No // Stability: stable // Examples: 128 // Note: It's recommended to set this attribute since e.g. too little memory can // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this // information. FaaSMaxMemoryKey = attribute.Key("faas.max_memory") ) // A host is defined as a general computing instance. const ( // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud // provider. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostIDKey = attribute.Key("host.id") // Name of the host. On Unix systems, it may contain what the hostname command // returns, or the fully qualified hostname, or another name specified by the // user. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-test' HostNameKey = attribute.Key("host.name") // Type of host. For Cloud, this must be the machine type. // // Type: string // Required: No // Stability: stable // Examples: 'n1-standard-1' HostTypeKey = attribute.Key("host.type") // The CPU architecture the host system is running on. // // Type: Enum // Required: No // Stability: stable HostArchKey = attribute.Key("host.arch") // Name of the VM image or OS install the host was instantiated from. // // Type: string // Required: No // Stability: stable // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' HostImageNameKey = attribute.Key("host.image.name") // VM image ID. For Cloud, this value is from the provider. // // Type: string // Required: No // Stability: stable // Examples: 'ami-07b06b442921831e5' HostImageIDKey = attribute.Key("host.image.id") // The version string of the VM image as defined in [Version // Attributes](README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '0.1' HostImageVersionKey = attribute.Key("host.image.version") ) var ( // AMD64 HostArchAMD64 = HostArchKey.String("amd64") // ARM32 HostArchARM32 = HostArchKey.String("arm32") // ARM64 HostArchARM64 = HostArchKey.String("arm64") // Itanium HostArchIA64 = HostArchKey.String("ia64") // 32-bit PowerPC HostArchPPC32 = HostArchKey.String("ppc32") // 64-bit PowerPC HostArchPPC64 = HostArchKey.String("ppc64") // IBM z/Architecture HostArchS390x = HostArchKey.String("s390x") // 32-bit x86 HostArchX86 = HostArchKey.String("x86") ) // A Kubernetes Cluster. const ( // The name of the cluster. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") ) // A Kubernetes Node object. const ( // The name of the Node. // // Type: string // Required: No // Stability: stable // Examples: 'node-1' K8SNodeNameKey = attribute.Key("k8s.node.name") // The UID of the Node. // // Type: string // Required: No // Stability: stable // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' K8SNodeUIDKey = attribute.Key("k8s.node.uid") ) // A Kubernetes Namespace. const ( // The name of the namespace that the pod is running in. // // Type: string // Required: No // Stability: stable // Examples: 'default' K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") ) // A Kubernetes Pod object. const ( // The UID of the Pod. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SPodUIDKey = attribute.Key("k8s.pod.uid") // The name of the Pod. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry-pod-autoconf' K8SPodNameKey = attribute.Key("k8s.pod.name") ) // A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). const ( // The name of the Container from Pod specification, must be unique within a Pod. // Container runtime usually uses different globally unique name // (`container.name`). // // Type: string // Required: No // Stability: stable // Examples: 'redis' K8SContainerNameKey = attribute.Key("k8s.container.name") // Number of times the container was restarted. This attribute can be used to // identify a particular container (running or stopped) within a container spec. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") ) // A Kubernetes ReplicaSet object. const ( // The UID of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") // The name of the ReplicaSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") ) // A Kubernetes Deployment object. const ( // The UID of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") // The name of the Deployment. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") ) // A Kubernetes StatefulSet object. const ( // The UID of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") // The name of the StatefulSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") ) // A Kubernetes DaemonSet object. const ( // The UID of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") // The name of the DaemonSet. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") ) // A Kubernetes Job object. const ( // The UID of the Job. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SJobUIDKey = attribute.Key("k8s.job.uid") // The name of the Job. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SJobNameKey = attribute.Key("k8s.job.name") ) // A Kubernetes CronJob object. const ( // The UID of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") // The name of the CronJob. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) // The operating system (OS) on which the process represented by this resource is running. const ( // The operating system type. // // Type: Enum // Required: Always // Stability: stable OSTypeKey = attribute.Key("os.type") // Human readable (not intended to be parsed) OS version information, like e.g. // reported by `ver` or `lsb_release -a` commands. // // Type: string // Required: No // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' OSDescriptionKey = attribute.Key("os.description") // Human readable operating system name. // // Type: string // Required: No // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") // The version string of the operating system as defined in [Version // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // Required: No // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) var ( // Microsoft Windows OSTypeWindows = OSTypeKey.String("windows") // Linux OSTypeLinux = OSTypeKey.String("linux") // Apple Darwin OSTypeDarwin = OSTypeKey.String("darwin") // FreeBSD OSTypeFreeBSD = OSTypeKey.String("freebsd") // NetBSD OSTypeNetBSD = OSTypeKey.String("netbsd") // OpenBSD OSTypeOpenBSD = OSTypeKey.String("openbsd") // DragonFly BSD OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") // HP-UX (Hewlett Packard Unix) OSTypeHPUX = OSTypeKey.String("hpux") // AIX (Advanced Interactive eXecutive) OSTypeAIX = OSTypeKey.String("aix") // Oracle Solaris OSTypeSolaris = OSTypeKey.String("solaris") // IBM z/OS OSTypeZOS = OSTypeKey.String("z_os") ) // An operating system process. const ( // Process identifier (PID). // // Type: int // Required: No // Stability: stable // Examples: 1234 ProcessPIDKey = attribute.Key("process.pid") // The name of the process executable. On Linux based systems, can be set to the // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") // The full path to the process executable. On Linux based systems, can be set to // the target of `proc/[pid]/exe`. On Windows, can be set to the result of // `GetProcessImageFileNameW`. // // Type: string // Required: See below // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") // The command used to launch the process (i.e. the command name). On Linux based // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, // can be set to the first parameter extracted from `GetCommandLineW`. // // Type: string // Required: See below // Stability: stable // Examples: 'cmd/otelcol' ProcessCommandKey = attribute.Key("process.command") // The full command used to launch the process as a single string representing the // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not // set this if you have to assemble it just for monitoring; use // `process.command_args` instead. // // Type: string // Required: See below // Stability: stable // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' ProcessCommandLineKey = attribute.Key("process.command_line") // All the command arguments (including the command/executable itself) as received // by the process. On Linux-based systems (and some other Unixoid systems // supporting procfs), can be set according to the list of null-delimited strings // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be // the full argv vector passed to `main`. // // Type: string[] // Required: See below // Stability: stable // Examples: 'cmd/otecol', '--config=config.yaml' ProcessCommandArgsKey = attribute.Key("process.command_args") // The username of the user that owns the process. // // Type: string // Required: No // Stability: stable // Examples: 'root' ProcessOwnerKey = attribute.Key("process.owner") ) // The single (language) runtime instance which is monitored. const ( // The name of the runtime of this process. For compiled native binaries, this // SHOULD be the name of the compiler. // // Type: string // Required: No // Stability: stable // Examples: 'OpenJDK Runtime Environment' ProcessRuntimeNameKey = attribute.Key("process.runtime.name") // The version of the runtime of this process, as returned by the runtime without // modification. // // Type: string // Required: No // Stability: stable // Examples: '14.0.2' ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") // An additional description about the runtime of the process, for example a // specific vendor customization of the runtime environment. // // Type: string // Required: No // Stability: stable // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) // A service instance. const ( // Logical name of the service. // // Type: string // Required: Always // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled services. If // the value was not specified, SDKs MUST fallback to `unknown_service:` // concatenated with [`process.executable.name`](process.md#process), e.g. // `unknown_service:bash`. If `process.executable.name` is not available, the // value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") // A namespace for `service.name`. // // Type: string // Required: No // Stability: stable // Examples: 'Shop' // Note: A string value having a meaning that helps to distinguish a group of // services, for example the team name that owns a group of services. // `service.name` is expected to be unique within the same namespace. If // `service.namespace` is not specified in the Resource then `service.name` is // expected to be unique for all services that have no explicit namespace defined // (so the empty/unspecified namespace is simply one more valid namespace). Zero- // length namespace string is assumed equal to unspecified namespace. ServiceNamespaceKey = attribute.Key("service.namespace") // The string ID of the service instance. // // Type: string // Required: No // Stability: stable // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be globally // unique). The ID helps to distinguish instances of the same service that exist // at the same time (e.g. instances of a horizontally scaled service). It is // preferable for the ID to be persistent and stay the same for the lifetime of // the service instance, however it is acceptable that the ID is ephemeral and // changes during important lifetime events for the service (e.g. service // restarts). If the service has no inherent unique ID that can be used as the // value of this attribute it is recommended to generate a random Version 1 or // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") // The version string of the service API or implementation. // // Type: string // Required: No // Stability: stable // Examples: '2.0.0' ServiceVersionKey = attribute.Key("service.version") ) // The telemetry SDK used to capture data recorded by the instrumentation libraries. const ( // The name of the telemetry SDK as defined above. // // Type: string // Required: No // Stability: stable // Examples: 'opentelemetry' TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") // The language of the telemetry SDK. // // Type: Enum // Required: No // Stability: stable TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") // The version string of the telemetry SDK. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") // The version string of the auto instrumentation agent, if used. // // Type: string // Required: No // Stability: stable // Examples: '1.2.3' TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( // cpp TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") // dotnet TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") // erlang TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") // go TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") // java TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") // nodejs TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") // php TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") // python TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") // swift TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. const ( // The name of the web engine. // // Type: string // Required: Always // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") // The version of the web engine. // // Type: string // Required: No // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") // Additional description of the web engine (e.g. detailed version and edition // information). // // Type: string // Required: No // Stability: stable // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' WebEngineDescriptionKey = attribute.Key("webengine.description") ) opentelemetry-go-1.21.0/semconv/v1.9.0/schema.go000066400000000000000000000017121452547353200212010ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semconv // import "go.opentelemetry.io/otel/semconv/v1.9.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ const SchemaURL = "https://opentelemetry.io/schemas/1.9.0" opentelemetry-go-1.21.0/semconv/v1.9.0/trace.go000066400000000000000000001621541452547353200210470ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated from semantic convention specification. DO NOT EDIT. package semconv // import "go.opentelemetry.io/otel/semconv/v1.9.0" import "go.opentelemetry.io/otel/attribute" // Span attributes used by AWS Lambda (in addition to general `faas` attributes). const ( // The full invoked ARN as provided on the `Context` passed to the function // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` // applicable). // // Type: string // Required: No // Stability: stable // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' // Note: This may be different from `faas.id` if an alias is involved. AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") ) // This document defines semantic conventions for the OpenTracing Shim const ( // Parent-child Reference type // // Type: Enum // Required: No // Stability: stable // Note: The causal relationship between a child Span and a parent Span. OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") ) var ( // The parent Span depends on the child Span in some capacity OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") // The parent Span does not depend in any way on the result of the child Span OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") ) // This document defines the attributes used to perform database client calls. const ( // An identifier for the database management system (DBMS) product being used. See // below for a list of well-known identifiers. // // Type: Enum // Required: Always // Stability: stable DBSystemKey = attribute.Key("db.system") // The connection string used to connect to the database. It is recommended to // remove embedded credentials. // // Type: string // Required: No // Stability: stable // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' DBConnectionStringKey = attribute.Key("db.connection_string") // Username for accessing the database. // // Type: string // Required: No // Stability: stable // Examples: 'readonly_user', 'reporting_user' DBUserKey = attribute.Key("db.user") // The fully-qualified class name of the [Java Database Connectivity // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver // used to connect. // // Type: string // Required: No // Stability: stable // Examples: 'org.postgresql.Driver', // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") // This attribute is used to report the name of the database being accessed. For // commands that switch the database, this should be set to the target database // (even if the command fails). // // Type: string // Required: Required, if applicable. // Stability: stable // Examples: 'customers', 'main' // Note: In some SQL databases, the database name to be used is called "schema // name". In case there are multiple layers that could be considered for database // name (e.g. Oracle instance name and schema name), the database name to be used // is the more specific layer (e.g. Oracle schema name). DBNameKey = attribute.Key("db.name") // The database statement being executed. // // Type: string // Required: Required if applicable and not explicitly disabled via // instrumentation configuration. // Stability: stable // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' // Note: The value may be sanitized to exclude sensitive information. DBStatementKey = attribute.Key("db.statement") // The name of the operation being executed, e.g. the [MongoDB command // name](https://docs.mongodb.com/manual/reference/command/#database-operations) // such as `findAndModify`, or the SQL keyword. // // Type: string // Required: Required, if `db.statement` is not applicable. // Stability: stable // Examples: 'findAndModify', 'HMSET', 'SELECT' // Note: When setting this to an SQL keyword, it is not recommended to attempt any // client-side parsing of `db.statement` just to get this property, but it should // be set if the operation name is provided by the library being instrumented. If // the SQL statement has an ambiguous operation, or performs more than one // operation, this value may be omitted. DBOperationKey = attribute.Key("db.operation") ) var ( // Some other SQL database. Fallback only. See notes DBSystemOtherSQL = DBSystemKey.String("other_sql") // Microsoft SQL Server DBSystemMSSQL = DBSystemKey.String("mssql") // MySQL DBSystemMySQL = DBSystemKey.String("mysql") // Oracle Database DBSystemOracle = DBSystemKey.String("oracle") // IBM DB2 DBSystemDB2 = DBSystemKey.String("db2") // PostgreSQL DBSystemPostgreSQL = DBSystemKey.String("postgresql") // Amazon Redshift DBSystemRedshift = DBSystemKey.String("redshift") // Apache Hive DBSystemHive = DBSystemKey.String("hive") // Cloudscape DBSystemCloudscape = DBSystemKey.String("cloudscape") // HyperSQL DataBase DBSystemHSQLDB = DBSystemKey.String("hsqldb") // Progress Database DBSystemProgress = DBSystemKey.String("progress") // SAP MaxDB DBSystemMaxDB = DBSystemKey.String("maxdb") // SAP HANA DBSystemHanaDB = DBSystemKey.String("hanadb") // Ingres DBSystemIngres = DBSystemKey.String("ingres") // FirstSQL DBSystemFirstSQL = DBSystemKey.String("firstsql") // EnterpriseDB DBSystemEDB = DBSystemKey.String("edb") // InterSystems Caché DBSystemCache = DBSystemKey.String("cache") // Adabas (Adaptable Database System) DBSystemAdabas = DBSystemKey.String("adabas") // Firebird DBSystemFirebird = DBSystemKey.String("firebird") // Apache Derby DBSystemDerby = DBSystemKey.String("derby") // FileMaker DBSystemFilemaker = DBSystemKey.String("filemaker") // Informix DBSystemInformix = DBSystemKey.String("informix") // InstantDB DBSystemInstantDB = DBSystemKey.String("instantdb") // InterBase DBSystemInterbase = DBSystemKey.String("interbase") // MariaDB DBSystemMariaDB = DBSystemKey.String("mariadb") // Netezza DBSystemNetezza = DBSystemKey.String("netezza") // Pervasive PSQL DBSystemPervasive = DBSystemKey.String("pervasive") // PointBase DBSystemPointbase = DBSystemKey.String("pointbase") // SQLite DBSystemSqlite = DBSystemKey.String("sqlite") // Sybase DBSystemSybase = DBSystemKey.String("sybase") // Teradata DBSystemTeradata = DBSystemKey.String("teradata") // Vertica DBSystemVertica = DBSystemKey.String("vertica") // H2 DBSystemH2 = DBSystemKey.String("h2") // ColdFusion IMQ DBSystemColdfusion = DBSystemKey.String("coldfusion") // Apache Cassandra DBSystemCassandra = DBSystemKey.String("cassandra") // Apache HBase DBSystemHBase = DBSystemKey.String("hbase") // MongoDB DBSystemMongoDB = DBSystemKey.String("mongodb") // Redis DBSystemRedis = DBSystemKey.String("redis") // Couchbase DBSystemCouchbase = DBSystemKey.String("couchbase") // CouchDB DBSystemCouchDB = DBSystemKey.String("couchdb") // Microsoft Azure Cosmos DB DBSystemCosmosDB = DBSystemKey.String("cosmosdb") // Amazon DynamoDB DBSystemDynamoDB = DBSystemKey.String("dynamodb") // Neo4j DBSystemNeo4j = DBSystemKey.String("neo4j") // Apache Geode DBSystemGeode = DBSystemKey.String("geode") // Elasticsearch DBSystemElasticsearch = DBSystemKey.String("elasticsearch") // Memcached DBSystemMemcached = DBSystemKey.String("memcached") // CockroachDB DBSystemCockroachdb = DBSystemKey.String("cockroachdb") ) // Connection-level attributes for Microsoft SQL Server const ( // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) // connecting to. This name is used to determine the port of a named instance. // // Type: string // Required: No // Stability: stable // Examples: 'MSSQLSERVER' // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer // required (but still recommended if non-standard). DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") ) // Call-level attributes for Cassandra const ( // The fetch size used for paging, i.e. how many rows will be returned at once. // // Type: int // Required: No // Stability: stable // Examples: 5000 DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") // The consistency level of the query. Based on consistency values from // [CQL](https://docs.datastax.com/en/cassandra- // oss/3.0/cassandra/dml/dmlConfigConsistency.html). // // Type: Enum // Required: No // Stability: stable DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") // The name of the primary table that the operation is acting upon, including the // keyspace name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'mytable' // Note: This mirrors the db.sql.table attribute but references cassandra rather // than sql. It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBCassandraTableKey = attribute.Key("db.cassandra.table") // Whether or not the query is idempotent. // // Type: boolean // Required: No // Stability: stable DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") // The number of times a query was speculatively executed. Not set or `0` if the // query was not executed speculatively. // // Type: int // Required: No // Stability: stable // Examples: 0, 2 DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") // The ID of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") // The data center of the coordinating node for a query. // // Type: string // Required: No // Stability: stable // Examples: 'us-west-2' DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") ) var ( // all DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") // each_quorum DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") // quorum DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") // local_quorum DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") // one DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") // two DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") // three DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") // local_one DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") // any DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") // serial DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") // local_serial DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") ) // Call-level attributes for Redis const ( // The index of the database being accessed as used in the [`SELECT` // command](https://redis.io/commands/select), provided as an integer. To be used // instead of the generic `db.name` attribute. // // Type: int // Required: Required, if other than the default database (`0`). // Stability: stable // Examples: 0, 1, 15 DBRedisDBIndexKey = attribute.Key("db.redis.database_index") ) // Call-level attributes for MongoDB const ( // The collection being accessed within the database stated in `db.name`. // // Type: string // Required: Always // Stability: stable // Examples: 'customers', 'products' DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") ) // Call-level attributes for SQL databases const ( // The name of the primary table that the operation is acting upon, including the // database name (if applicable). // // Type: string // Required: Recommended if available. // Stability: stable // Examples: 'public.users', 'customers' // Note: It is not recommended to attempt any client-side parsing of // `db.statement` just to get this property, but it should be set if it is // provided by the library being instrumented. If the operation is acting upon an // anonymous table, or more than one table, this value MUST NOT be set. DBSQLTableKey = attribute.Key("db.sql.table") ) // This document defines the attributes used to report a single exception associated with a span. const ( // The type of the exception (its fully-qualified class name, if applicable). The // dynamic type of the exception should be preferred over the static type in // languages that support it. // // Type: string // Required: No // Stability: stable // Examples: 'java.net.ConnectException', 'OSError' ExceptionTypeKey = attribute.Key("exception.type") // The exception message. // // Type: string // Required: No // Stability: stable // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" ExceptionMessageKey = attribute.Key("exception.message") // A stacktrace as a string in the natural representation for the language // runtime. The representation is to be determined and documented by each language // SIG. // // Type: string // Required: No // Stability: stable // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test // exception\\n at ' // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' ExceptionStacktraceKey = attribute.Key("exception.stacktrace") // SHOULD be set to true if the exception event is recorded at a point where it is // known that the exception is escaping the scope of the span. // // Type: boolean // Required: No // Stability: stable // Note: An exception is considered to have escaped (or left) the scope of a span, // if that span is ended while the exception is still logically "in flight". // This may be actually "in flight" in some languages (e.g. if the exception // is passed to a Context manager's `__exit__` method in Python) but will // usually be caught at the point of recording the exception in most languages. // It is usually not possible to determine at the point where an exception is // thrown // whether it will escape the scope of a span. // However, it is trivial to know that an exception // will escape, if one checks for an active exception just before ending the span, // as done in the [example above](#exception-end-example). // It follows that an exception may still escape the scope of the span // even if the `exception.escaped` attribute was not set or set to false, // since the event might have been recorded at a time where it was not // clear whether the exception will escape. ExceptionEscapedKey = attribute.Key("exception.escaped") ) // This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. const ( // Type of the trigger which caused this function execution. // // Type: Enum // Required: No // Stability: stable // Note: For the server/consumer span on the incoming side, // `faas.trigger` MUST be set. // Clients invoking FaaS instances usually cannot set `faas.trigger`, // since they would typically need to look in the payload to determine // the event type. If clients set it, it should be the same as the // trigger that corresponding incoming would have (i.e., this has // nothing to do with the underlying transport used to make the API // call to invoke the lambda, which is often HTTP). FaaSTriggerKey = attribute.Key("faas.trigger") // The execution ID of the current function execution. // // Type: string // Required: No // Stability: stable // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' FaaSExecutionKey = attribute.Key("faas.execution") ) var ( // A response to some data source operation such as a database or filesystem read/write FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") // To provide an answer to an inbound HTTP request FaaSTriggerHTTP = FaaSTriggerKey.String("http") // A function is set to be executed when messages are sent to a messaging system FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") // A function is scheduled to be executed regularly FaaSTriggerTimer = FaaSTriggerKey.String("timer") // If none of the others apply FaaSTriggerOther = FaaSTriggerKey.String("other") ) // Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. const ( // The name of the source on which the triggering operation was performed. For // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos // DB to the database name. // // Type: string // Required: Always // Stability: stable // Examples: 'myBucketName', 'myDBName' FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") // Describes the type of the operation that was performed on the data. // // Type: Enum // Required: Always // Stability: stable FaaSDocumentOperationKey = attribute.Key("faas.document.operation") // A string containing the time when the data was accessed in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSDocumentTimeKey = attribute.Key("faas.document.time") // The document name/table subjected to the operation. For example, in Cloud // Storage or S3 is the name of the file, and in Cosmos DB the table name. // // Type: string // Required: No // Stability: stable // Examples: 'myFile.txt', 'myTableName' FaaSDocumentNameKey = attribute.Key("faas.document.name") ) var ( // When a new object is created FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") // When an object is modified FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") // When an object is deleted FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") ) // Semantic Convention for FaaS scheduled to be executed regularly. const ( // A string containing the function invocation time in the [ISO // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed // in [UTC](https://www.w3.org/TR/NOTE-datetime). // // Type: string // Required: Always // Stability: stable // Examples: '2020-01-23T13:47:06Z' FaaSTimeKey = attribute.Key("faas.time") // A string containing the schedule period as [Cron Expression](https://docs.oracl // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). // // Type: string // Required: No // Stability: stable // Examples: '0/5 * * * ? *' FaaSCronKey = attribute.Key("faas.cron") ) // Contains additional attributes for incoming FaaS spans. const ( // A boolean that is true if the serverless function is executed for the first // time (aka cold-start). // // Type: boolean // Required: No // Stability: stable FaaSColdstartKey = attribute.Key("faas.coldstart") ) // Contains additional attributes for outgoing FaaS spans. const ( // The name of the invoked function. // // Type: string // Required: Always // Stability: stable // Examples: 'my-function' // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked // function. FaaSInvokedNameKey = attribute.Key("faas.invoked_name") // The cloud provider of the invoked function. // // Type: Enum // Required: Always // Stability: stable // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked // function. FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") // The cloud region of the invoked function. // // Type: string // Required: For some cloud providers, like AWS or GCP, the region in which a // function is hosted is essential to uniquely identify the function and also part // of its endpoint. Since it's part of the endpoint being called, the region is // always known to clients. In these cases, `faas.invoked_region` MUST be set // accordingly. If the region is unknown to the client or not required for // identifying the invoked function, setting `faas.invoked_region` is optional. // Stability: stable // Examples: 'eu-central-1' // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked // function. FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") ) var ( // Alibaba Cloud FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") // Amazon Web Services FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") // Microsoft Azure FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") // Google Cloud Platform FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") // Tencent Cloud FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") ) // These attributes may be used for any network related operation. const ( // Transport protocol used. See note below. // // Type: Enum // Required: No // Stability: stable NetTransportKey = attribute.Key("net.transport") // Remote address of the peer (dotted decimal for IPv4 or // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) // // Type: string // Required: No // Stability: stable // Examples: '127.0.0.1' NetPeerIPKey = attribute.Key("net.peer.ip") // Remote port number. // // Type: int // Required: No // Stability: stable // Examples: 80, 8080, 443 NetPeerPortKey = attribute.Key("net.peer.port") // Remote hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'example.com' NetPeerNameKey = attribute.Key("net.peer.name") // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. // // Type: string // Required: No // Stability: stable // Examples: '192.168.0.1' NetHostIPKey = attribute.Key("net.host.ip") // Like `net.peer.port` but for the host port. // // Type: int // Required: No // Stability: stable // Examples: 35555 NetHostPortKey = attribute.Key("net.host.port") // Local hostname or similar, see note below. // // Type: string // Required: No // Stability: stable // Examples: 'localhost' NetHostNameKey = attribute.Key("net.host.name") // The internet connection type currently being used by the host. // // Type: Enum // Required: No // Stability: stable // Examples: 'wifi' NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") // This describes more details regarding the connection.type. It may be the type // of cell technology connection, but it could be used for describing details // about a wifi connection. // // Type: Enum // Required: No // Stability: stable // Examples: 'LTE' NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") // The name of the mobile carrier. // // Type: string // Required: No // Stability: stable // Examples: 'sprint' NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") // The mobile carrier country code. // // Type: string // Required: No // Stability: stable // Examples: '310' NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") // The mobile carrier network code. // // Type: string // Required: No // Stability: stable // Examples: '001' NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") // The ISO 3166-1 alpha-2 2-character country code associated with the mobile // carrier network. // // Type: string // Required: No // Stability: stable // Examples: 'DE' NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") ) var ( // ip_tcp NetTransportTCP = NetTransportKey.String("ip_tcp") // ip_udp NetTransportUDP = NetTransportKey.String("ip_udp") // Another IP-based protocol NetTransportIP = NetTransportKey.String("ip") // Unix Domain socket. See below NetTransportUnix = NetTransportKey.String("unix") // Named or anonymous pipe. See note below NetTransportPipe = NetTransportKey.String("pipe") // In-process communication NetTransportInProc = NetTransportKey.String("inproc") // Something else (non IP-based) NetTransportOther = NetTransportKey.String("other") ) var ( // wifi NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") // wired NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") // cell NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") // unavailable NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") // unknown NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") ) var ( // GPRS NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") // EDGE NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") // UMTS NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") // CDMA NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") // EVDO Rel. 0 NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") // EVDO Rev. A NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") // CDMA2000 1XRTT NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") // HSDPA NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") // HSUPA NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") // HSPA NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") // IDEN NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") // EVDO Rev. B NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") // LTE NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") // EHRPD NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") // HSPAP NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") // GSM NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") // TD-SCDMA NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") // IWLAN NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") // 5G NR (New Radio) NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") // 5G NRNSA (New Radio Non-Standalone) NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") // LTE CA NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") ) // Operations that access some remote service. const ( // The [`service.name`](../../resource/semantic_conventions/README.md#service) of // the remote service. SHOULD be equal to the actual `service.name` resource // attribute of the remote service if any. // // Type: string // Required: No // Stability: stable // Examples: 'AuthTokenCache' PeerServiceKey = attribute.Key("peer.service") ) // These attributes may be used for any operation with an authenticated and/or authorized enduser. const ( // Username or client_id extracted from the access token or // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the // inbound request from outside the system. // // Type: string // Required: No // Stability: stable // Examples: 'username' EnduserIDKey = attribute.Key("enduser.id") // Actual/assumed role the client is making the request under extracted from token // or application security context. // // Type: string // Required: No // Stability: stable // Examples: 'admin' EnduserRoleKey = attribute.Key("enduser.role") // Scopes or granted authorities the client currently possesses extracted from // token or application security context. The value would come from the scope // associated with an [OAuth 2.0 Access // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value // in a [SAML 2.0 Assertion](http://docs.oasis- // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). // // Type: string // Required: No // Stability: stable // Examples: 'read:message, write:files' EnduserScopeKey = attribute.Key("enduser.scope") ) // These attributes may be used for any operation to store information about a thread that started a span. const ( // Current "managed" thread ID (as opposed to OS thread ID). // // Type: int // Required: No // Stability: stable // Examples: 42 ThreadIDKey = attribute.Key("thread.id") // Current thread name. // // Type: string // Required: No // Stability: stable // Examples: 'main' ThreadNameKey = attribute.Key("thread.name") ) // These attributes allow to report this unit of code and therefore to provide more context about the span. const ( // The method or function name, or equivalent (usually rightmost part of the code // unit's name). // // Type: string // Required: No // Stability: stable // Examples: 'serveRequest' CodeFunctionKey = attribute.Key("code.function") // The "namespace" within which `code.function` is defined. Usually the qualified // class or module name, such that `code.namespace` + some separator + // `code.function` form a unique identifier for the code unit. // // Type: string // Required: No // Stability: stable // Examples: 'com.example.MyHTTPService' CodeNamespaceKey = attribute.Key("code.namespace") // The source code file name that identifies the code unit as uniquely as possible // (preferably an absolute file path). // // Type: string // Required: No // Stability: stable // Examples: '/usr/local/MyApplication/content_root/app/index.php' CodeFilepathKey = attribute.Key("code.filepath") // The line number in `code.filepath` best representing the operation. It SHOULD // point within the code unit named in `code.function`. // // Type: int // Required: No // Stability: stable // Examples: 42 CodeLineNumberKey = attribute.Key("code.lineno") ) // This document defines semantic conventions for HTTP client and server Spans. const ( // HTTP request method. // // Type: string // Required: Always // Stability: stable // Examples: 'GET', 'POST', 'HEAD' HTTPMethodKey = attribute.Key("http.method") // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. // Usually the fragment is not transmitted over HTTP, but if it is known, it // should be included nevertheless. // // Type: string // Required: No // Stability: stable // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' // Note: `http.url` MUST NOT contain credentials passed via URL in form of // `https://username:password@www.example.com/`. In such case the attribute's // value should be `https://www.example.com/`. HTTPURLKey = attribute.Key("http.url") // The full request target as passed in a HTTP request line or equivalent. // // Type: string // Required: No // Stability: stable // Examples: '/path/12314/?q=ddds#123' HTTPTargetKey = attribute.Key("http.target") // The value of the [HTTP host // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header // should also be reported, see note. // // Type: string // Required: No // Stability: stable // Examples: 'www.example.org' // Note: When the header is present but empty the attribute SHOULD be set to the // empty string. Note that this is a valid situation that is expected in certain // cases, according the aforementioned [section of RFC // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not // set the attribute MUST NOT be set. HTTPHostKey = attribute.Key("http.host") // The URI scheme identifying the used protocol. // // Type: string // Required: No // Stability: stable // Examples: 'http', 'https' HTTPSchemeKey = attribute.Key("http.scheme") // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). // // Type: int // Required: If and only if one was received/sent. // Stability: stable // Examples: 200 HTTPStatusCodeKey = attribute.Key("http.status_code") // Kind of HTTP protocol used. // // Type: Enum // Required: No // Stability: stable // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. HTTPFlavorKey = attribute.Key("http.flavor") // Value of the [HTTP User- // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the // client. // // Type: string // Required: No // Stability: stable // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' HTTPUserAgentKey = attribute.Key("http.user_agent") // The size of the request payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") // The size of the uncompressed request payload body after transport decoding. Not // set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") // The size of the response payload body in bytes. This is the number of bytes // transferred excluding headers and is often, but not always, present as the // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For // requests using transport encoding, this should be the compressed size. // // Type: int // Required: No // Stability: stable // Examples: 3495 HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") // The size of the uncompressed response payload body after transport decoding. // Not set if transport encoding not used. // // Type: int // Required: No // Stability: stable // Examples: 5493 HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") ) var ( // HTTP 1.0 HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") // HTTP 1.1 HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") // HTTP 2 HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") // SPDY protocol HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") // QUIC protocol HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") ) // Semantic Convention for HTTP Server const ( // The primary server name of the matched virtual host. This should be obtained // via configuration. If no such configuration can be obtained, this attribute // MUST NOT be set ( `net.host.name` should be used instead). // // Type: string // Required: No // Stability: stable // Examples: 'example.com' // Note: `http.url` is usually not readily available on the server side but would // have to be assembled in a cumbersome and sometimes lossy process from other // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus // preferred to supply the raw data that is available. HTTPServerNameKey = attribute.Key("http.server_name") // The matched route (path template). // // Type: string // Required: No // Stability: stable // Examples: '/users/:userID?' HTTPRouteKey = attribute.Key("http.route") // The IP address of the original client behind all proxies, if known (e.g. from // [X-Forwarded-For](https://developer.mozilla.org/en- // US/docs/Web/HTTP/Headers/X-Forwarded-For)). // // Type: string // Required: No // Stability: stable // Examples: '83.164.160.102' // Note: This is not necessarily the same as `net.peer.ip`, which would // identify the network-level peer, which may be a proxy. // This attribute should be set when a source of information different // from the one used for `net.peer.ip`, is available even if that other // source just confirms the same value as `net.peer.ip`. // Rationale: For `net.peer.ip`, one typically does not know if it // comes from a proxy, reverse proxy, or the actual client. Setting // `http.client_ip` when it's the same as `net.peer.ip` means that // one is at least somewhat confident that the address is not that of // the closest proxy. HTTPClientIPKey = attribute.Key("http.client_ip") ) // Attributes that exist for multiple DynamoDB request types. const ( // The keys in the `RequestItems` object field. // // Type: string[] // Required: No // Stability: stable // Examples: 'Users', 'Cats' AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") // The JSON-serialized value of each item in the `ConsumedCapacity` response // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": // "string", "WriteCapacityUnits": number }' AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") // The JSON-serialized value of the `ItemCollectionMetrics` response field. // // Type: string // Required: No // Stability: stable // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. // // Type: double // Required: No // Stability: stable // Examples: 1.0, 2.0 AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") // The value of the `ConsistentRead` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") // The value of the `ProjectionExpression` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, // ProductReviews' AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") // The value of the `Limit` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") // The value of the `AttributesToGet` request parameter. // // Type: string[] // Required: No // Stability: stable // Examples: 'lives', 'id' AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") // The value of the `IndexName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'name_to_group' AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") // The value of the `Select` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'ALL_ATTRIBUTES', 'COUNT' AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") ) // DynamoDB.CreateTable const ( // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request // field // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": // number, "WriteCapacityUnits": number } }' AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], // "ProjectionType": "string" } }' AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") ) // DynamoDB.ListTables const ( // The value of the `ExclusiveStartTableName` request parameter. // // Type: string // Required: No // Stability: stable // Examples: 'Users', 'CatsTable' AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") // The the number of items in the `TableNames` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 20 AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") ) // DynamoDB.Query const ( // The value of the `ScanIndexForward` request parameter. // // Type: boolean // Required: No // Stability: stable AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") ) // DynamoDB.Scan const ( // The value of the `Segment` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") // The value of the `TotalSegments` request parameter. // // Type: int // Required: No // Stability: stable // Examples: 100 AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") // The value of the `Count` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 10 AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") // The value of the `ScannedCount` response parameter. // // Type: int // Required: No // Stability: stable // Examples: 50 AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") ) // DynamoDB.UpdateTable const ( // The JSON-serialized value of each item in the `AttributeDefinitions` request // field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` // request field. // // Type: string[] // Required: No // Stability: stable // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { // "AttributeName": "string", "KeyType": "string" } ], "Projection": { // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": // number } }' AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") ) // This document defines the attributes used in messaging systems. const ( // A string identifying the messaging system. // // Type: string // Required: Always // Stability: stable // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' MessagingSystemKey = attribute.Key("messaging.system") // The message destination name. This might be equal to the span name but is // required nevertheless. // // Type: string // Required: Always // Stability: stable // Examples: 'MyQueue', 'MyTopic' MessagingDestinationKey = attribute.Key("messaging.destination") // The kind of message destination // // Type: Enum // Required: Required only if the message destination is either a `queue` or // `topic`. // Stability: stable MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") // A boolean that is true if the message destination is temporary. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") // The name of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: 'AMQP', 'MQTT' MessagingProtocolKey = attribute.Key("messaging.protocol") // The version of the transport protocol. // // Type: string // Required: No // Stability: stable // Examples: '0.9.1' MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") // Connection string. // // Type: string // Required: No // Stability: stable // Examples: 'tibjmsnaming://localhost:7222', // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' MessagingURLKey = attribute.Key("messaging.url") // A value used by the messaging system as an identifier for the message, // represented as a string. // // Type: string // Required: No // Stability: stable // Examples: '452a7c7c7c7048c2f887f61572b18fc2' MessagingMessageIDKey = attribute.Key("messaging.message_id") // The [conversation ID](#conversations) identifying the conversation to which the // message belongs, represented as a string. Sometimes called "Correlation ID". // // Type: string // Required: No // Stability: stable // Examples: 'MyConversationID' MessagingConversationIDKey = attribute.Key("messaging.conversation_id") // The (uncompressed) size of the message payload in bytes. Also use this // attribute if it is unknown whether the compressed or uncompressed payload size // is reported. // // Type: int // Required: No // Stability: stable // Examples: 2738 MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") // The compressed size of the message payload in bytes. // // Type: int // Required: No // Stability: stable // Examples: 2048 MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") ) var ( // A message sent to a queue MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") // A message sent to a topic MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") ) // Semantic convention for a consumer of messages received from a messaging system const ( // A string identifying the kind of message consumption as defined in the // [Operation names](#operation-names) section above. If the operation is "send", // this attribute MUST NOT be set, since the operation can be inferred from the // span kind in that case. // // Type: Enum // Required: No // Stability: stable MessagingOperationKey = attribute.Key("messaging.operation") // The identifier for the consumer receiving a message. For Kafka, set it to // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are // present, or only `messaging.kafka.consumer_group`. For brokers, such as // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the // message. // // Type: string // Required: No // Stability: stable // Examples: 'mygroup - client-6' MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") ) var ( // receive MessagingOperationReceive = MessagingOperationKey.String("receive") // process MessagingOperationProcess = MessagingOperationKey.String("process") ) // Attributes for RabbitMQ const ( // RabbitMQ message routing key. // // Type: string // Required: Unless it is empty. // Stability: stable // Examples: 'myKey' MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") ) // Attributes for Apache Kafka const ( // Message keys in Kafka are used for grouping alike messages to ensure they're // processed on the same partition. They differ from `messaging.message_id` in // that they're not unique. If the key is `null`, the attribute MUST NOT be set. // // Type: string // Required: No // Stability: stable // Examples: 'myKey' // Note: If the key type is not string, it's string representation has to be // supplied for the attribute. If the key has no unambiguous, canonical string // form, don't include its value. MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") // Name of the Kafka Consumer Group that is handling the message. Only applies to // consumers, not producers. // // Type: string // Required: No // Stability: stable // Examples: 'my-group' MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") // Client ID for the Consumer or Producer that is handling the message. // // Type: string // Required: No // Stability: stable // Examples: 'client-5' MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") // Partition the message is sent to. // // Type: int // Required: No // Stability: stable // Examples: 2 MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") // A boolean that is true if the message is a tombstone. // // Type: boolean // Required: If missing, it is assumed to be false. // Stability: stable MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") ) // Attributes for Apache RocketMQ const ( // Namespace of RocketMQ resources, resources in different namespaces are // individual. // // Type: string // Required: Always // Stability: stable // Examples: 'myNamespace' MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") // Name of the RocketMQ producer/consumer group that is handling the message. The // client type is identified by the SpanKind. // // Type: string // Required: Always // Stability: stable // Examples: 'myConsumerGroup' MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") // The unique identifier for each client. // // Type: string // Required: Always // Stability: stable // Examples: 'myhost@8742@s8083jm' MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") // Type of message. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") // The secondary classifier of message besides topic. // // Type: string // Required: No // Stability: stable // Examples: 'tagA' MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") // Key(s) of message, another way to mark message besides message id. // // Type: string[] // Required: No // Stability: stable // Examples: 'keyA', 'keyB' MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") // Model of message consumption. This only applies to consumer spans. // // Type: Enum // Required: No // Stability: stable MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") ) var ( // Normal message MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") // FIFO message MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") // Delay message MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") // Transaction message MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") ) var ( // Clustering consumption model MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") // Broadcasting consumption model MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") ) // This document defines semantic conventions for remote procedure calls. const ( // A string identifying the remoting system. // // Type: string // Required: Always // Stability: stable // Examples: 'grpc', 'java_rmi', 'wcf' RPCSystemKey = attribute.Key("rpc.system") // The full (logical) name of the service being called, including its package // name, if applicable. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'myservice.EchoService' // Note: This is the logical name of the service from the RPC interface // perspective, which can be different from the name of any implementing class. // The `code.namespace` attribute may be used to store the latter (despite the // attribute name, it may include a class name; e.g., class with method actually // executing the call on the server side, RPC client stub class on the client // side). RPCServiceKey = attribute.Key("rpc.service") // The name of the (logical) method being called, must be equal to the $method // part in the span name. // // Type: string // Required: No, but recommended // Stability: stable // Examples: 'exampleMethod' // Note: This is the logical name of the method from the RPC interface // perspective, which can be different from the name of any implementing // method/function. The `code.function` attribute may be used to store the latter // (e.g., method actually executing the call on the server side, RPC client stub // method on the client side). RPCMethodKey = attribute.Key("rpc.method") ) // Tech-specific attributes for gRPC. const ( // The [numeric status // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC // request. // // Type: Enum // Required: Always // Stability: stable RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) var ( // OK RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) // CANCELLED RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) // UNKNOWN RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) // INVALID_ARGUMENT RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) // DEADLINE_EXCEEDED RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) // NOT_FOUND RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) // ALREADY_EXISTS RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) // PERMISSION_DENIED RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) // RESOURCE_EXHAUSTED RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) // FAILED_PRECONDITION RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) // ABORTED RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) // OUT_OF_RANGE RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) // UNIMPLEMENTED RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) // INTERNAL RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) // UNAVAILABLE RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) // DATA_LOSS RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) // UNAUTHENTICATED RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) ) // Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). const ( // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC // 1.0 does not specify this, the value can be omitted. // // Type: string // Required: If missing, it is assumed to be "1.0". // Stability: stable // Examples: '2.0', '1.0' RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") // `id` property of request or response. Since protocol allows id to be int, // string, `null` or missing (for notifications), value is expected to be cast to // string for simplicity. Use empty string in case of `null` value. Omit entirely // if this is a notification. // // Type: string // Required: No // Stability: stable // Examples: '10', 'request-7', '' RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") // `error.code` property of response if it is an error response. // // Type: int // Required: If missing, response is assumed to be successful. // Stability: stable // Examples: -32700, 100 RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") // `error.message` property of response if it is an error response. // // Type: string // Required: No // Stability: stable // Examples: 'Parse error', 'User already exists' RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") ) // RPC received/sent message. const ( // Whether this is a received or sent message. // // Type: Enum // Required: No // Stability: stable MessageTypeKey = attribute.Key("message.type") // MUST be calculated as two different counters starting from `1` one for sent // messages and one for received message. // // Type: int // Required: No // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") // Compressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageCompressedSizeKey = attribute.Key("message.compressed_size") // Uncompressed size of the message in bytes. // // Type: int // Required: No // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) var ( // sent MessageTypeSent = MessageTypeKey.String("SENT") // received MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) opentelemetry-go-1.21.0/trace.go000066400000000000000000000031221452547353200165270ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" import ( "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/trace" ) // Tracer creates a named tracer that implements Tracer interface. // If the name is an empty string then provider uses default name. // // This is short for GetTracerProvider().Tracer(name, opts...) func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { return GetTracerProvider().Tracer(name, opts...) } // GetTracerProvider returns the registered global trace provider. // If none is registered then an instance of NoopTracerProvider is returned. // // Use the trace provider to create a named tracer. E.g. // // tracer := otel.GetTracerProvider().Tracer("example.com/foo") // // or // // tracer := otel.Tracer("example.com/foo") func GetTracerProvider() trace.TracerProvider { return global.TracerProvider() } // SetTracerProvider registers `tp` as the global trace provider. func SetTracerProvider(tp trace.TracerProvider) { global.SetTracerProvider(tp) } opentelemetry-go-1.21.0/trace/000077500000000000000000000000001452547353200162025ustar00rootroot00000000000000opentelemetry-go-1.21.0/trace/config.go000066400000000000000000000234451452547353200200060ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/trace" import ( "time" "go.opentelemetry.io/otel/attribute" ) // TracerConfig is a group of options for a Tracer. type TracerConfig struct { instrumentationVersion string // Schema URL of the telemetry emitted by the Tracer. schemaURL string attrs attribute.Set } // InstrumentationVersion returns the version of the library providing instrumentation. func (t *TracerConfig) InstrumentationVersion() string { return t.instrumentationVersion } // InstrumentationAttributes returns the attributes associated with the library // providing instrumentation. func (t *TracerConfig) InstrumentationAttributes() attribute.Set { return t.attrs } // SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. func (t *TracerConfig) SchemaURL() string { return t.schemaURL } // NewTracerConfig applies all the options to a returned TracerConfig. func NewTracerConfig(options ...TracerOption) TracerConfig { var config TracerConfig for _, option := range options { config = option.apply(config) } return config } // TracerOption applies an option to a TracerConfig. type TracerOption interface { apply(TracerConfig) TracerConfig } type tracerOptionFunc func(TracerConfig) TracerConfig func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { return fn(cfg) } // SpanConfig is a group of options for a Span. type SpanConfig struct { attributes []attribute.KeyValue timestamp time.Time links []Link newRoot bool spanKind SpanKind stackTrace bool } // Attributes describe the associated qualities of a Span. func (cfg *SpanConfig) Attributes() []attribute.KeyValue { return cfg.attributes } // Timestamp is a time in a Span life-cycle. func (cfg *SpanConfig) Timestamp() time.Time { return cfg.timestamp } // StackTrace checks whether stack trace capturing is enabled. func (cfg *SpanConfig) StackTrace() bool { return cfg.stackTrace } // Links are the associations a Span has with other Spans. func (cfg *SpanConfig) Links() []Link { return cfg.links } // NewRoot identifies a Span as the root Span for a new trace. This is // commonly used when an existing trace crosses trust boundaries and the // remote parent span context should be ignored for security. func (cfg *SpanConfig) NewRoot() bool { return cfg.newRoot } // SpanKind is the role a Span has in a trace. func (cfg *SpanConfig) SpanKind() SpanKind { return cfg.spanKind } // NewSpanStartConfig applies all the options to a returned SpanConfig. // No validation is performed on the returned SpanConfig (e.g. no uniqueness // checking or bounding of data), it is left to the SDK to perform this // action. func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { var c SpanConfig for _, option := range options { c = option.applySpanStart(c) } return c } // NewSpanEndConfig applies all the options to a returned SpanConfig. // No validation is performed on the returned SpanConfig (e.g. no uniqueness // checking or bounding of data), it is left to the SDK to perform this // action. func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { var c SpanConfig for _, option := range options { c = option.applySpanEnd(c) } return c } // SpanStartOption applies an option to a SpanConfig. These options are applicable // only when the span is created. type SpanStartOption interface { applySpanStart(SpanConfig) SpanConfig } type spanOptionFunc func(SpanConfig) SpanConfig func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { return fn(cfg) } // SpanEndOption applies an option to a SpanConfig. These options are // applicable only when the span is ended. type SpanEndOption interface { applySpanEnd(SpanConfig) SpanConfig } // EventConfig is a group of options for an Event. type EventConfig struct { attributes []attribute.KeyValue timestamp time.Time stackTrace bool } // Attributes describe the associated qualities of an Event. func (cfg *EventConfig) Attributes() []attribute.KeyValue { return cfg.attributes } // Timestamp is a time in an Event life-cycle. func (cfg *EventConfig) Timestamp() time.Time { return cfg.timestamp } // StackTrace checks whether stack trace capturing is enabled. func (cfg *EventConfig) StackTrace() bool { return cfg.stackTrace } // NewEventConfig applies all the EventOptions to a returned EventConfig. If no // timestamp option is passed, the returned EventConfig will have a Timestamp // set to the call time, otherwise no validation is performed on the returned // EventConfig. func NewEventConfig(options ...EventOption) EventConfig { var c EventConfig for _, option := range options { c = option.applyEvent(c) } if c.timestamp.IsZero() { c.timestamp = time.Now() } return c } // EventOption applies span event options to an EventConfig. type EventOption interface { applyEvent(EventConfig) EventConfig } // SpanOption are options that can be used at both the beginning and end of a span. type SpanOption interface { SpanStartOption SpanEndOption } // SpanStartEventOption are options that can be used at the start of a span, or with an event. type SpanStartEventOption interface { SpanStartOption EventOption } // SpanEndEventOption are options that can be used at the end of a span, or with an event. type SpanEndEventOption interface { SpanEndOption EventOption } type attributeOption []attribute.KeyValue func (o attributeOption) applySpan(c SpanConfig) SpanConfig { c.attributes = append(c.attributes, []attribute.KeyValue(o)...) return c } func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } func (o attributeOption) applyEvent(c EventConfig) EventConfig { c.attributes = append(c.attributes, []attribute.KeyValue(o)...) return c } var _ SpanStartEventOption = attributeOption{} // WithAttributes adds the attributes related to a span life-cycle event. // These attributes are used to describe the work a Span represents when this // option is provided to a Span's start or end events. Otherwise, these // attributes provide additional information about the event being recorded // (e.g. error, state change, processing progress, system event). // // If multiple of these options are passed the attributes of each successive // option will extend the attributes instead of overwriting. There is no // guarantee of uniqueness in the resulting attributes. func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { return attributeOption(attributes) } // SpanEventOption are options that can be used with an event or a span. type SpanEventOption interface { SpanOption EventOption } type timestampOption time.Time func (o timestampOption) applySpan(c SpanConfig) SpanConfig { c.timestamp = time.Time(o) return c } func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } func (o timestampOption) applyEvent(c EventConfig) EventConfig { c.timestamp = time.Time(o) return c } var _ SpanEventOption = timestampOption{} // WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. // started, stopped, errored). func WithTimestamp(t time.Time) SpanEventOption { return timestampOption(t) } type stackTraceOption bool func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { c.stackTrace = bool(o) return c } func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { c.stackTrace = bool(o) return c } func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } // WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). func WithStackTrace(b bool) SpanEndEventOption { return stackTraceOption(b) } // WithLinks adds links to a Span. The links are added to the existing Span // links, i.e. this does not overwrite. Links with invalid span context are ignored. func WithLinks(links ...Link) SpanStartOption { return spanOptionFunc(func(cfg SpanConfig) SpanConfig { cfg.links = append(cfg.links, links...) return cfg }) } // WithNewRoot specifies that the Span should be treated as a root Span. Any // existing parent span context will be ignored when defining the Span's trace // identifiers. func WithNewRoot() SpanStartOption { return spanOptionFunc(func(cfg SpanConfig) SpanConfig { cfg.newRoot = true return cfg }) } // WithSpanKind sets the SpanKind of a Span. func WithSpanKind(kind SpanKind) SpanStartOption { return spanOptionFunc(func(cfg SpanConfig) SpanConfig { cfg.spanKind = kind return cfg }) } // WithInstrumentationVersion sets the instrumentation version. func WithInstrumentationVersion(version string) TracerOption { return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { cfg.instrumentationVersion = version return cfg }) } // WithInstrumentationAttributes sets the instrumentation attributes. // // The passed attributes will be de-duplicated. func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { return tracerOptionFunc(func(config TracerConfig) TracerConfig { config.attrs = attribute.NewSet(attr...) return config }) } // WithSchemaURL sets the schema URL for the Tracer. func WithSchemaURL(schemaURL string) TracerOption { return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { cfg.schemaURL = schemaURL return cfg }) } opentelemetry-go-1.21.0/trace/config_test.go000066400000000000000000000142661452547353200210460ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "testing" "time" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" ) func TestNewSpanConfig(t *testing.T) { k1v1 := attribute.String("key1", "value1") k1v2 := attribute.String("key1", "value2") k2v2 := attribute.String("key2", "value2") timestamp0 := time.Unix(0, 0) timestamp1 := time.Unix(0, 0) link1 := Link{ SpanContext: SpanContext{traceID: TraceID([16]byte{1, 1}), spanID: SpanID{3}}, Attributes: []attribute.KeyValue{k1v1}, } link2 := Link{ SpanContext: SpanContext{traceID: TraceID([16]byte{1, 1}), spanID: SpanID{3}}, Attributes: []attribute.KeyValue{k1v2, k2v2}, } tests := []struct { options []SpanStartOption expected SpanConfig }{ { // No non-zero-values should be set. []SpanStartOption{}, SpanConfig{}, }, { []SpanStartOption{ WithAttributes(k1v1), }, SpanConfig{ attributes: []attribute.KeyValue{k1v1}, }, }, { // Multiple calls should append not overwrite. []SpanStartOption{ WithAttributes(k1v1), WithAttributes(k1v2), WithAttributes(k2v2), }, SpanConfig{ // No uniqueness is guaranteed by the API. attributes: []attribute.KeyValue{k1v1, k1v2, k2v2}, }, }, { []SpanStartOption{ WithAttributes(k1v1, k1v2, k2v2), }, SpanConfig{ // No uniqueness is guaranteed by the API. attributes: []attribute.KeyValue{k1v1, k1v2, k2v2}, }, }, { []SpanStartOption{ WithTimestamp(timestamp0), }, SpanConfig{ timestamp: timestamp0, }, }, { []SpanStartOption{ // Multiple calls overwrites with last-one-wins. WithTimestamp(timestamp0), WithTimestamp(timestamp1), }, SpanConfig{ timestamp: timestamp1, }, }, { []SpanStartOption{ WithLinks(link1), }, SpanConfig{ links: []Link{link1}, }, }, { []SpanStartOption{ // Multiple calls should append not overwrite. WithLinks(link1), WithLinks(link1, link2), }, SpanConfig{ // No uniqueness is guaranteed by the API. links: []Link{link1, link1, link2}, }, }, { []SpanStartOption{ WithNewRoot(), }, SpanConfig{ newRoot: true, }, }, { []SpanStartOption{ // Multiple calls should not change NewRoot state. WithNewRoot(), WithNewRoot(), }, SpanConfig{ newRoot: true, }, }, { []SpanStartOption{ WithSpanKind(SpanKindConsumer), }, SpanConfig{ spanKind: SpanKindConsumer, }, }, { []SpanStartOption{ // Multiple calls overwrites with last-one-wins. WithSpanKind(SpanKindClient), WithSpanKind(SpanKindConsumer), }, SpanConfig{ spanKind: SpanKindConsumer, }, }, { // Everything should work together. []SpanStartOption{ WithAttributes(k1v1), WithTimestamp(timestamp0), WithLinks(link1, link2), WithNewRoot(), WithSpanKind(SpanKindConsumer), }, SpanConfig{ attributes: []attribute.KeyValue{k1v1}, timestamp: timestamp0, links: []Link{link1, link2}, newRoot: true, spanKind: SpanKindConsumer, }, }, } for _, test := range tests { assert.Equal(t, test.expected, NewSpanStartConfig(test.options...)) } } func TestEndSpanConfig(t *testing.T) { timestamp := time.Unix(0, 0) tests := []struct { options []SpanEndOption expected SpanConfig }{ { []SpanEndOption{}, SpanConfig{}, }, { []SpanEndOption{ WithStackTrace(true), }, SpanConfig{ stackTrace: true, }, }, { []SpanEndOption{ WithTimestamp(timestamp), }, SpanConfig{ timestamp: timestamp, }, }, } for _, test := range tests { assert.Equal(t, test.expected, NewSpanEndConfig(test.options...)) } } func TestTracerConfig(t *testing.T) { v1 := "semver:0.0.1" v2 := "semver:1.0.0" schemaURL := "https://opentelemetry.io/schemas/1.2.0" attrs := attribute.NewSet( attribute.String("user", "alice"), attribute.Bool("admin", true), ) c := NewTracerConfig( // Multiple calls should overwrite. WithInstrumentationVersion(v1), WithInstrumentationVersion(v2), WithSchemaURL(schemaURL), WithInstrumentationAttributes(attrs.ToSlice()...), ) assert.Equal(t, v2, c.InstrumentationVersion(), "instrumentation version") assert.Equal(t, schemaURL, c.SchemaURL(), "schema URL") assert.Equal(t, attrs, c.InstrumentationAttributes(), "instrumentation attributes") } // Save benchmark results to a file level var to avoid the compiler optimizing // away the actual work. var ( tracerConfig TracerConfig spanConfig SpanConfig eventConfig EventConfig ) func BenchmarkNewTracerConfig(b *testing.B) { opts := []TracerOption{ WithInstrumentationVersion("testing version"), WithSchemaURL("testing URL"), } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { tracerConfig = NewTracerConfig(opts...) } } func BenchmarkNewSpanStartConfig(b *testing.B) { opts := []SpanStartOption{ WithAttributes(attribute.Bool("key", true)), WithTimestamp(time.Now()), WithLinks(Link{}), WithNewRoot(), WithSpanKind(SpanKindClient), } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { spanConfig = NewSpanStartConfig(opts...) } } func BenchmarkNewSpanEndConfig(b *testing.B) { opts := []SpanEndOption{ WithTimestamp(time.Now()), WithStackTrace(true), } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { spanConfig = NewSpanEndConfig(opts...) } } func BenchmarkNewEventConfig(b *testing.B) { opts := []EventOption{ WithAttributes(attribute.Bool("key", true)), WithTimestamp(time.Now()), WithStackTrace(true), } b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { eventConfig = NewEventConfig(opts...) } } opentelemetry-go-1.21.0/trace/context.go000066400000000000000000000044311452547353200202170ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/trace" import "context" type traceContextKeyType int const currentSpanKey traceContextKeyType = iota // ContextWithSpan returns a copy of parent with span set as the current Span. func ContextWithSpan(parent context.Context, span Span) context.Context { return context.WithValue(parent, currentSpanKey, span) } // ContextWithSpanContext returns a copy of parent with sc as the current // Span. The Span implementation that wraps sc is non-recording and performs // no operations other than to return sc as the SpanContext from the // SpanContext method. func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } // ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { return ContextWithSpanContext(parent, rsc.WithRemote(true)) } // SpanFromContext returns the current Span from ctx. // // If no Span is currently set in ctx an implementation of a Span that // performs no operations is returned. func SpanFromContext(ctx context.Context) Span { if ctx == nil { return noopSpan{} } if span, ok := ctx.Value(currentSpanKey).(Span); ok { return span } return noopSpan{} } // SpanContextFromContext returns the current Span's SpanContext. func SpanContextFromContext(ctx context.Context) SpanContext { return SpanFromContext(ctx).SpanContext() } opentelemetry-go-1.21.0/trace/context_test.go000066400000000000000000000045771452547353200212710ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/trace" import ( "context" "testing" "github.com/stretchr/testify/assert" ) type testSpan struct { noopSpan ID byte Remote bool } func (s testSpan) SpanContext() SpanContext { return SpanContext{ traceID: [16]byte{1}, spanID: [8]byte{s.ID}, remote: s.Remote, } } var ( emptySpan = noopSpan{} localSpan = testSpan{ID: 1, Remote: false} remoteSpan = testSpan{ID: 1, Remote: true} wrappedSpan = nonRecordingSpan{sc: remoteSpan.SpanContext()} ) func TestSpanFromContext(t *testing.T) { testCases := []struct { name string context context.Context expectedSpan Span }{ { name: "empty context", context: nil, expectedSpan: emptySpan, }, { name: "background context", context: context.Background(), expectedSpan: emptySpan, }, { name: "local span", context: ContextWithSpan(context.Background(), localSpan), expectedSpan: localSpan, }, { name: "remote span", context: ContextWithSpan(context.Background(), remoteSpan), expectedSpan: remoteSpan, }, { name: "wrapped remote span", context: ContextWithRemoteSpanContext(context.Background(), remoteSpan.SpanContext()), expectedSpan: wrappedSpan, }, { name: "wrapped local span becomes remote", context: ContextWithRemoteSpanContext(context.Background(), localSpan.SpanContext()), expectedSpan: wrappedSpan, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { assert.Equal(t, tc.expectedSpan, SpanFromContext(tc.context)) // Ensure SpanContextFromContext is just // SpanFromContext(…).SpanContext(). assert.Equal(t, tc.expectedSpan.SpanContext(), SpanContextFromContext(tc.context)) }) } } opentelemetry-go-1.21.0/trace/doc.go000066400000000000000000000103351452547353200173000ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package trace provides an implementation of the tracing part of the OpenTelemetry API. To participate in distributed traces a Span needs to be created for the operation being performed as part of a traced workflow. In its simplest form: var tracer trace.Tracer func init() { tracer = otel.Tracer("instrumentation/package/name") } func operation(ctx context.Context) { var span trace.Span ctx, span = tracer.Start(ctx, "operation") defer span.End() // ... } A Tracer is unique to the instrumentation and is used to create Spans. Instrumentation should be designed to accept a TracerProvider from which it can create its own unique Tracer. Alternatively, the registered global TracerProvider from the go.opentelemetry.io/otel package can be used as a default. const ( name = "instrumentation/package/name" version = "0.1.0" ) type Instrumentation struct { tracer trace.Tracer } func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { if tp == nil { tp = otel.TracerProvider() } return &Instrumentation{ tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), } } func operation(ctx context.Context, inst *Instrumentation) { var span trace.Span ctx, span = inst.tracer.Start(ctx, "operation") defer span.End() // ... } # API Implementations This package does not conform to the standard Go versioning policy; all of its interfaces may have methods added to them without a package major version bump. This non-standard API evolution could surprise an uninformed implementation author. They could unknowingly build their implementation in a way that would result in a runtime panic for their users that update to the new API. The API is designed to help inform an instrumentation author about this non-standard API evolution. It requires them to choose a default behavior for unimplemented interface methods. There are three behavior choices they can make: - Compilation failure - Panic - Default to another implementation All interfaces in this API embed a corresponding interface from [go.opentelemetry.io/otel/trace/embedded]. If an author wants the default behavior of their implementations to be a compilation failure, signaling to their users they need to update to the latest version of that implementation, they need to embed the corresponding interface from [go.opentelemetry.io/otel/trace/embedded] in their implementation. For example, import "go.opentelemetry.io/otel/trace/embedded" type TracerProvider struct { embedded.TracerProvider // ... } If an author wants the default behavior of their implementations to panic, they can embed the API interface directly. import "go.opentelemetry.io/otel/trace" type TracerProvider struct { trace.TracerProvider // ... } This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of [go.opentelemetry.io/otel/trace], which may be done with a trasitive dependency. Finally, an author can embed another implementation in theirs. The embedded implementation will be used for methods not defined by the author. For example, an author who wants to default to silently dropping the call can use [go.opentelemetry.io/otel/trace/noop]: import "go.opentelemetry.io/otel/trace/noop" type TracerProvider struct { noop.TracerProvider // ... } It is strongly recommended that authors only embed [go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. That implementation is the only one OpenTelemetry authors can guarantee will fully implement all the API interfaces when a user updates their API. */ package trace // import "go.opentelemetry.io/otel/trace" opentelemetry-go-1.21.0/trace/embedded/000077500000000000000000000000001452547353200177335ustar00rootroot00000000000000opentelemetry-go-1.21.0/trace/embedded/embedded.go000066400000000000000000000051501452547353200220140ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package embedded provides interfaces embedded within the [OpenTelemetry // trace API]. // // Implementers of the [OpenTelemetry trace API] can embed the relevant type // from this package into their implementation directly. Doing so will result // in a compilation error for users when the [OpenTelemetry trace API] is // extended (which is something that can happen without a major version bump of // the API package). // // [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace package embedded // import "go.opentelemetry.io/otel/trace/embedded" // TracerProvider is embedded in // [go.opentelemetry.io/otel/trace.TracerProvider]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to // experience a compilation error, signaling they need to update to your latest // implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] // interface is extended (which is something that can happen without a major // version bump of the API package). type TracerProvider interface{ tracerProvider() } // Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a // compilation error, signaling they need to update to your latest // implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface // is extended (which is something that can happen without a major version bump // of the API package). type Tracer interface{ tracer() } // Span is embedded in [go.opentelemetry.io/otel/trace.Span]. // // Embed this interface in your implementation of the // [go.opentelemetry.io/otel/trace.Span] if you want users to experience a // compilation error, signaling they need to update to your latest // implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is // extended (which is something that can happen without a major version bump of // the API package). type Span interface{ span() } opentelemetry-go-1.21.0/trace/go.mod000066400000000000000000000006251452547353200173130ustar00rootroot00000000000000module go.opentelemetry.io/otel/trace go 1.20 replace go.opentelemetry.io/otel => ../ require ( github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.8.4 go.opentelemetry.io/otel v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace go.opentelemetry.io/otel/metric => ../metric opentelemetry-go-1.21.0/trace/go.sum000066400000000000000000000020301452547353200173300ustar00rootroot00000000000000github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= opentelemetry-go-1.21.0/trace/nonrecording.go000066400000000000000000000017431452547353200212250ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/trace" // nonRecordingSpan is a minimal implementation of a Span that wraps a // SpanContext. It performs no operations other than to return the wrapped // SpanContext. type nonRecordingSpan struct { noopSpan sc SpanContext } // SpanContext returns the wrapped SpanContext. func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } opentelemetry-go-1.21.0/trace/noop.go000066400000000000000000000056141452547353200175120ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/trace" import ( "context" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace/embedded" ) // NewNoopTracerProvider returns an implementation of TracerProvider that // performs no operations. The Tracer and Spans created from the returned // TracerProvider also perform no operations. // // Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] // instead. func NewNoopTracerProvider() TracerProvider { return noopTracerProvider{} } type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} // Tracer returns noop implementation of Tracer. func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } // noopTracer is an implementation of Tracer that performs no operations. type noopTracer struct{ embedded.Tracer } var _ Tracer = noopTracer{} // Start carries forward a non-recording Span, if one is present in the context, otherwise it // creates a no-op Span. func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { span := SpanFromContext(ctx) if _, ok := span.(nonRecordingSpan); !ok { // span is likely already a noopSpan, but let's be sure span = noopSpan{} } return ContextWithSpan(ctx, span), span } // noopSpan is an implementation of Span that performs no operations. type noopSpan struct{ embedded.Span } var _ Span = noopSpan{} // SpanContext returns an empty span context. func (noopSpan) SpanContext() SpanContext { return SpanContext{} } // IsRecording always returns false. func (noopSpan) IsRecording() bool { return false } // SetStatus does nothing. func (noopSpan) SetStatus(codes.Code, string) {} // SetError does nothing. func (noopSpan) SetError(bool) {} // SetAttributes does nothing. func (noopSpan) SetAttributes(...attribute.KeyValue) {} // End does nothing. func (noopSpan) End(...SpanEndOption) {} // RecordError does nothing. func (noopSpan) RecordError(error, ...EventOption) {} // AddEvent does nothing. func (noopSpan) AddEvent(string, ...EventOption) {} // SetName does nothing. func (noopSpan) SetName(string) {} // TracerProvider returns a no-op TracerProvider. func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } opentelemetry-go-1.21.0/trace/noop/000077500000000000000000000000001452547353200171555ustar00rootroot00000000000000opentelemetry-go-1.21.0/trace/noop/noop.go000066400000000000000000000077521452547353200204720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package noop provides an implementation of the OpenTelemetry trace API that // produces no telemetry and minimizes used computation resources. // // Using this package to implement the OpenTelemetry trace API will effectively // disable OpenTelemetry. // // This implementation can be embedded in other implementations of the // OpenTelemetry trace API. Doing so will mean the implementation defaults to // no operation for methods it does not implement. package noop // import "go.opentelemetry.io/otel/trace/noop" import ( "context" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" ) var ( // Compile-time check this implements the OpenTelemetry API. _ trace.TracerProvider = TracerProvider{} _ trace.Tracer = Tracer{} _ trace.Span = Span{} ) // TracerProvider is an OpenTelemetry No-Op TracerProvider. type TracerProvider struct{ embedded.TracerProvider } // NewTracerProvider returns a TracerProvider that does not record any telemetry. func NewTracerProvider() TracerProvider { return TracerProvider{} } // Tracer returns an OpenTelemetry Tracer that does not record any telemetry. func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { return Tracer{} } // Tracer is an OpenTelemetry No-Op Tracer. type Tracer struct{ embedded.Tracer } // Start creates a span. The created span will be set in a child context of ctx // and returned with the span. // // If ctx contains a span context, the returned span will also contain that // span context. If the span context in ctx is for a non-recording span, that // span instance will be returned directly. func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { span := trace.SpanFromContext(ctx) // If the parent context contains a non-zero span context, that span // context needs to be returned as a non-recording span // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). var zeroSC trace.SpanContext if sc := span.SpanContext(); !sc.Equal(zeroSC) { if !span.IsRecording() { // If the span is not recording return it directly. return ctx, span } // Otherwise, return the span context needs in a non-recording span. span = Span{sc: sc} } else { // No parent, return a No-Op span with an empty span context. span = Span{} } return trace.ContextWithSpan(ctx, span), span } // Span is an OpenTelemetry No-Op Span. type Span struct { embedded.Span sc trace.SpanContext } // SpanContext returns an empty span context. func (s Span) SpanContext() trace.SpanContext { return s.sc } // IsRecording always returns false. func (Span) IsRecording() bool { return false } // SetStatus does nothing. func (Span) SetStatus(codes.Code, string) {} // SetAttributes does nothing. func (Span) SetAttributes(...attribute.KeyValue) {} // End does nothing. func (Span) End(...trace.SpanEndOption) {} // RecordError does nothing. func (Span) RecordError(error, ...trace.EventOption) {} // AddEvent does nothing. func (Span) AddEvent(string, ...trace.EventOption) {} // SetName does nothing. func (Span) SetName(string) {} // TracerProvider returns a No-Op TracerProvider. func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } opentelemetry-go-1.21.0/trace/noop/noop_test.go000066400000000000000000000100651452547353200215200ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package noop // import "go.opentelemetry.io/otel/trace/noop" import ( "context" "reflect" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/trace" ) func TestImplementationNoPanics(t *testing.T) { // Check that if type has an embedded interface and that interface has // methods added to it than the No-Op implementation implements them. t.Run("TracerProvider", assertAllExportedMethodNoPanic( reflect.ValueOf(TracerProvider{}), reflect.TypeOf((*trace.TracerProvider)(nil)).Elem(), )) t.Run("Meter", assertAllExportedMethodNoPanic( reflect.ValueOf(Tracer{}), reflect.TypeOf((*trace.Tracer)(nil)).Elem(), )) t.Run("Span", assertAllExportedMethodNoPanic( reflect.ValueOf(Span{}), reflect.TypeOf((*trace.Span)(nil)).Elem(), )) } func assertAllExportedMethodNoPanic(rVal reflect.Value, rType reflect.Type) func(*testing.T) { return func(t *testing.T) { for n := 0; n < rType.NumMethod(); n++ { mType := rType.Method(n) if !mType.IsExported() { t.Logf("ignoring unexported %s", mType.Name) continue } m := rVal.MethodByName(mType.Name) if !m.IsValid() { t.Errorf("unknown method for %s: %s", rVal.Type().Name(), mType.Name) } numIn := mType.Type.NumIn() if mType.Type.IsVariadic() { numIn-- } args := make([]reflect.Value, numIn) ctx := context.Background() for i := range args { aType := mType.Type.In(i) if aType.Name() == "Context" { // Do not panic on a nil context. args[i] = reflect.ValueOf(ctx) } else { args[i] = reflect.New(aType).Elem() } } assert.NotPanicsf(t, func() { _ = m.Call(args) }, "%s.%s", rVal.Type().Name(), mType.Name) } } } func TestNewTracerProvider(t *testing.T) { tp := NewTracerProvider() assert.Equal(t, tp, TracerProvider{}) tracer := tp.Tracer("") assert.Equal(t, tracer, Tracer{}) } func TestTracerStartPropagatesSpanContext(t *testing.T) { tracer := NewTracerProvider().Tracer("") spanCtx := trace.SpanContext{} ctx := trace.ContextWithSpanContext(context.Background(), spanCtx) ctx, span := tracer.Start(ctx, "test_span") assert.Equal(t, spanCtx, trace.SpanContextFromContext(ctx), "empty span context not set in context") assert.IsType(t, Span{}, span, "non-noop span returned") assert.Equal(t, spanCtx, span.SpanContext(), "empty span context not returned from span") assert.False(t, span.IsRecording(), "empty span context returned recording span") spanCtx = spanCtx.WithTraceID(trace.TraceID([16]byte{1})) spanCtx = spanCtx.WithSpanID(trace.SpanID([8]byte{1})) ctx = trace.ContextWithSpanContext(context.Background(), spanCtx) ctx, span = tracer.Start(ctx, "test_span") assert.Equal(t, spanCtx, trace.SpanContextFromContext(ctx), "non-empty span context not set in context") assert.Equal(t, spanCtx, span.SpanContext(), "non-empty span context not returned from span") assert.False(t, span.IsRecording(), "non-empty span context returned recording span") rSpan := recordingSpan{Span: Span{sc: spanCtx}} ctx = trace.ContextWithSpan(context.Background(), rSpan) ctx, span = tracer.Start(ctx, "test_span") assert.Equal(t, spanCtx, trace.SpanContextFromContext(ctx), "recording span's span context not set in context") assert.IsType(t, Span{}, span, "non-noop span returned") assert.Equal(t, spanCtx, span.SpanContext(), "recording span's span context not returned from span") assert.False(t, span.IsRecording(), "recording span returned") } type recordingSpan struct{ Span } func (recordingSpan) IsRecording() bool { return true } opentelemetry-go-1.21.0/trace/noop_test.go000066400000000000000000000055451452547353200205540ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "testing" ) func TestNewNoopTracerProvider(t *testing.T) { got, want := NewNoopTracerProvider(), noopTracerProvider{} if got != want { t.Errorf("NewNoopTracerProvider() returned %#v, want %#v", got, want) } } func TestNoopTracerProviderTracer(t *testing.T) { tp := NewNoopTracerProvider() got, want := tp.Tracer(""), noopTracer{} if got != want { t.Errorf("noopTracerProvider.Tracer() returned %#v, want %#v", got, want) } } func TestNoopTracerStart(t *testing.T) { ctx := context.Background() tracer := NewNoopTracerProvider().Tracer("test instrumentation") var span Span ctx, span = tracer.Start(ctx, "span name") got, ok := span.(noopSpan) if !ok { t.Fatalf("noopTracer.Start() returned a non-noopSpan: %#v", span) } want := noopSpan{} if got != want { t.Errorf("noopTracer.Start() returned %#v, want %#v", got, want) } got, ok = SpanFromContext(ctx).(noopSpan) if !ok { t.Fatal("noopTracer.Start() did not set span as current in returned context") } if got != want { t.Errorf("noopTracer.Start() current span in returned context set to %#v, want %#v", got, want) } } func TestNoopSpan(t *testing.T) { tracer := NewNoopTracerProvider().Tracer("test instrumentation") _, s := tracer.Start(context.Background(), "test span") span := s.(noopSpan) if got, want := span.SpanContext(), (SpanContext{}); !assertSpanContextEqual(got, want) { t.Errorf("span.SpanContext() returned %#v, want %#v", got, want) } if got, want := span.IsRecording(), false; got != want { t.Errorf("span.IsRecording() returned %#v, want %#v", got, want) } } func TestNonRecordingSpanTracerStart(t *testing.T) { tid, err := TraceIDFromHex("01000000000000000000000000000000") if err != nil { t.Fatalf("failure creating TraceID: %s", err.Error()) } sid, err := SpanIDFromHex("0200000000000000") if err != nil { t.Fatalf("failure creating SpanID: %s", err.Error()) } sc := NewSpanContext(SpanContextConfig{TraceID: tid, SpanID: sid}) ctx := ContextWithSpanContext(context.Background(), sc) _, span := NewNoopTracerProvider().Tracer("test instrumentation").Start(ctx, "span1") if got, want := span.SpanContext(), sc; !assertSpanContextEqual(got, want) { t.Errorf("SpanContext not carried by nonRecordingSpan. got %#v, want %#v", got, want) } } opentelemetry-go-1.21.0/trace/trace.go000066400000000000000000000465341452547353200176430ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" "context" "encoding/hex" "encoding/json" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/trace/embedded" ) const ( // FlagsSampled is a bitmask with the sampled bit set. A SpanContext // with the sampling bit set means the span is sampled. FlagsSampled = TraceFlags(0x01) errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" errNilTraceID errorConst = "trace-id can't be all zero" errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" errNilSpanID errorConst = "span-id can't be all zero" ) type errorConst string func (e errorConst) Error() string { return string(e) } // TraceID is a unique identity of a trace. // nolint:revive // revive complains about stutter of `trace.TraceID`. type TraceID [16]byte var ( nilTraceID TraceID _ json.Marshaler = nilTraceID ) // IsValid checks whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. func (t TraceID) IsValid() bool { return !bytes.Equal(t[:], nilTraceID[:]) } // MarshalJSON implements a custom marshal function to encode TraceID // as a hex string. func (t TraceID) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } // String returns the hex string representation form of a TraceID. func (t TraceID) String() string { return hex.EncodeToString(t[:]) } // SpanID is a unique identity of a span in a trace. type SpanID [8]byte var ( nilSpanID SpanID _ json.Marshaler = nilSpanID ) // IsValid checks whether the SpanID is valid. A valid SpanID does not consist // of zeros only. func (s SpanID) IsValid() bool { return !bytes.Equal(s[:], nilSpanID[:]) } // MarshalJSON implements a custom marshal function to encode SpanID // as a hex string. func (s SpanID) MarshalJSON() ([]byte, error) { return json.Marshal(s.String()) } // String returns the hex string representation form of a SpanID. func (s SpanID) String() string { return hex.EncodeToString(s[:]) } // TraceIDFromHex returns a TraceID from a hex string if it is compliant with // the W3C trace-context specification. See more at // https://www.w3.org/TR/trace-context/#trace-id // nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. func TraceIDFromHex(h string) (TraceID, error) { t := TraceID{} if len(h) != 32 { return t, errInvalidTraceIDLength } if err := decodeHex(h, t[:]); err != nil { return t, err } if !t.IsValid() { return t, errNilTraceID } return t, nil } // SpanIDFromHex returns a SpanID from a hex string if it is compliant // with the w3c trace-context specification. // See more at https://www.w3.org/TR/trace-context/#parent-id func SpanIDFromHex(h string) (SpanID, error) { s := SpanID{} if len(h) != 16 { return s, errInvalidSpanIDLength } if err := decodeHex(h, s[:]); err != nil { return s, err } if !s.IsValid() { return s, errNilSpanID } return s, nil } func decodeHex(h string, b []byte) error { for _, r := range h { switch { case 'a' <= r && r <= 'f': continue case '0' <= r && r <= '9': continue default: return errInvalidHexID } } decoded, err := hex.DecodeString(h) if err != nil { return err } copy(b, decoded) return nil } // TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. // IsSampled returns if the sampling bit is set in the TraceFlags. func (tf TraceFlags) IsSampled() bool { return tf&FlagsSampled == FlagsSampled } // WithSampled sets the sampling bit in a new copy of the TraceFlags. func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. if sampled { return tf | FlagsSampled } return tf &^ FlagsSampled } // MarshalJSON implements a custom marshal function to encode TraceFlags // as a hex string. func (tf TraceFlags) MarshalJSON() ([]byte, error) { return json.Marshal(tf.String()) } // String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { return hex.EncodeToString([]byte{byte(tf)}[:]) } // SpanContextConfig contains mutable fields usable for constructing // an immutable SpanContext. type SpanContextConfig struct { TraceID TraceID SpanID SpanID TraceFlags TraceFlags TraceState TraceState Remote bool } // NewSpanContext constructs a SpanContext using values from the provided // SpanContextConfig. func NewSpanContext(config SpanContextConfig) SpanContext { return SpanContext{ traceID: config.TraceID, spanID: config.SpanID, traceFlags: config.TraceFlags, traceState: config.TraceState, remote: config.Remote, } } // SpanContext contains identifying trace information about a Span. type SpanContext struct { traceID TraceID spanID SpanID traceFlags TraceFlags traceState TraceState remote bool } var _ json.Marshaler = SpanContext{} // IsValid returns if the SpanContext is valid. A valid span context has a // valid TraceID and SpanID. func (sc SpanContext) IsValid() bool { return sc.HasTraceID() && sc.HasSpanID() } // IsRemote indicates whether the SpanContext represents a remotely-created Span. func (sc SpanContext) IsRemote() bool { return sc.remote } // WithRemote returns a copy of sc with the Remote property set to remote. func (sc SpanContext) WithRemote(remote bool) SpanContext { return SpanContext{ traceID: sc.traceID, spanID: sc.spanID, traceFlags: sc.traceFlags, traceState: sc.traceState, remote: remote, } } // TraceID returns the TraceID from the SpanContext. func (sc SpanContext) TraceID() TraceID { return sc.traceID } // HasTraceID checks if the SpanContext has a valid TraceID. func (sc SpanContext) HasTraceID() bool { return sc.traceID.IsValid() } // WithTraceID returns a new SpanContext with the TraceID replaced. func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { return SpanContext{ traceID: traceID, spanID: sc.spanID, traceFlags: sc.traceFlags, traceState: sc.traceState, remote: sc.remote, } } // SpanID returns the SpanID from the SpanContext. func (sc SpanContext) SpanID() SpanID { return sc.spanID } // HasSpanID checks if the SpanContext has a valid SpanID. func (sc SpanContext) HasSpanID() bool { return sc.spanID.IsValid() } // WithSpanID returns a new SpanContext with the SpanID replaced. func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { return SpanContext{ traceID: sc.traceID, spanID: spanID, traceFlags: sc.traceFlags, traceState: sc.traceState, remote: sc.remote, } } // TraceFlags returns the flags from the SpanContext. func (sc SpanContext) TraceFlags() TraceFlags { return sc.traceFlags } // IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. func (sc SpanContext) IsSampled() bool { return sc.traceFlags.IsSampled() } // WithTraceFlags returns a new SpanContext with the TraceFlags replaced. func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { return SpanContext{ traceID: sc.traceID, spanID: sc.spanID, traceFlags: flags, traceState: sc.traceState, remote: sc.remote, } } // TraceState returns the TraceState from the SpanContext. func (sc SpanContext) TraceState() TraceState { return sc.traceState } // WithTraceState returns a new SpanContext with the TraceState replaced. func (sc SpanContext) WithTraceState(state TraceState) SpanContext { return SpanContext{ traceID: sc.traceID, spanID: sc.spanID, traceFlags: sc.traceFlags, traceState: state, remote: sc.remote, } } // Equal is a predicate that determines whether two SpanContext values are equal. func (sc SpanContext) Equal(other SpanContext) bool { return sc.traceID == other.traceID && sc.spanID == other.spanID && sc.traceFlags == other.traceFlags && sc.traceState.String() == other.traceState.String() && sc.remote == other.remote } // MarshalJSON implements a custom marshal function to encode a SpanContext. func (sc SpanContext) MarshalJSON() ([]byte, error) { return json.Marshal(SpanContextConfig{ TraceID: sc.traceID, SpanID: sc.spanID, TraceFlags: sc.traceFlags, TraceState: sc.traceState, Remote: sc.remote, }) } // Span is the individual component of a trace. It represents a single named // and timed operation of a workflow that is traced. A Tracer is used to // create a Span and it is then up to the operation the Span represents to // properly end the Span when the operation itself ends. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Span interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Span // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method // is called. Therefore, updates to the Span are not allowed after this // method has been called. End(options ...SpanEndOption) // AddEvent adds an event with the provided name and options. AddEvent(name string, options ...EventOption) // IsRecording returns the recording state of the Span. It will return // true if the Span is active and events can be recorded. IsRecording() bool // RecordError will record err as an exception span event for this span. An // additional call to SetStatus is required if the Status of the Span should // be set to Error, as this method does not change the Span status. If this // span is not being recorded or err is nil then this method does nothing. RecordError(err error, options ...EventOption) // SpanContext returns the SpanContext of the Span. The returned SpanContext // is usable even after the End method has been called for the Span. SpanContext() SpanContext // SetStatus sets the status of the Span in the form of a code and a // description, provided the status hasn't already been set to a higher // value before (OK > Error > Unset). The description is only included in a // status when the code is for an error. SetStatus(code codes.Code, description string) // SetName sets the Span name. SetName(name string) // SetAttributes sets kv as attributes of the Span. If a key from kv // already exists for an attribute of the Span it will be overwritten with // the value contained in kv. SetAttributes(kv ...attribute.KeyValue) // TracerProvider returns a TracerProvider that can be used to generate // additional Spans on the same telemetry pipeline as the current Span. TracerProvider() TracerProvider } // Link is the relationship between two Spans. The relationship can be within // the same Trace or across different Traces. // // For example, a Link is used in the following situations: // // 1. Batch Processing: A batch of operations may contain operations // associated with one or more traces/spans. Since there can only be one // parent SpanContext, a Link is used to keep reference to the // SpanContext of all operations in the batch. // 2. Public Endpoint: A SpanContext for an in incoming client request on a // public endpoint should be considered untrusted. In such a case, a new // trace with its own identity and sampling decision needs to be created, // but this new trace needs to be related to the original trace in some // form. A Link is used to keep reference to the original SpanContext and // track the relationship. type Link struct { // SpanContext of the linked Span. SpanContext SpanContext // Attributes describe the aspects of the link. Attributes []attribute.KeyValue } // LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { return Link{ SpanContext: SpanContextFromContext(ctx), Attributes: attrs, } } // SpanKind is the role a Span plays in a Trace. type SpanKind int // As a convenience, these match the proto definition, see // https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 // // The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` // to coerce a span kind to a valid value. const ( // SpanKindUnspecified is an unspecified SpanKind and is not a valid // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal // if it is received. SpanKindUnspecified SpanKind = 0 // SpanKindInternal is a SpanKind for a Span that represents an internal // operation within an application. SpanKindInternal SpanKind = 1 // SpanKindServer is a SpanKind for a Span that represents the operation // of handling a request from a client. SpanKindServer SpanKind = 2 // SpanKindClient is a SpanKind for a Span that represents the operation // of client making a request to a server. SpanKindClient SpanKind = 3 // SpanKindProducer is a SpanKind for a Span that represents the operation // of a producer sending a message to a message broker. Unlike // SpanKindClient and SpanKindServer, there is often no direct // relationship between this kind of Span and a SpanKindConsumer kind. A // SpanKindProducer Span will end once the message is accepted by the // message broker which might not overlap with the processing of that // message. SpanKindProducer SpanKind = 4 // SpanKindConsumer is a SpanKind for a Span that represents the operation // of a consumer receiving a message from a message broker. Like // SpanKindProducer Spans, there is often no direct relationship between // this Span and the Span that produced the message. SpanKindConsumer SpanKind = 5 ) // ValidateSpanKind returns a valid span kind value. This will coerce // invalid values into the default value, SpanKindInternal. func ValidateSpanKind(spanKind SpanKind) SpanKind { switch spanKind { case SpanKindInternal, SpanKindServer, SpanKindClient, SpanKindProducer, SpanKindConsumer: // valid return spanKind default: return SpanKindInternal } } // String returns the specified name of the SpanKind in lower-case. func (sk SpanKind) String() string { switch sk { case SpanKindInternal: return "internal" case SpanKindServer: return "server" case SpanKindClient: return "client" case SpanKindProducer: return "producer" case SpanKindConsumer: return "consumer" default: return "unspecified" } } // Tracer is the creator of Spans. // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type Tracer interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.Tracer // Start creates a span and a context.Context containing the newly-created span. // // If the context.Context provided in `ctx` contains a Span then the newly-created // Span will be a child of that span, otherwise it will be a root span. This behavior // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the // newly-created Span to be a root span even if `ctx` contains a Span. // // When creating a Span it is recommended to provide all known span attributes using // the `WithAttributes()` SpanOption as samplers will only have access to the // attributes provided when a Span is created. // // Any Span that is created MUST also be ended. This is the responsibility of the user. // Implementations of this API may leak memory or other resources if Spans are not ended. Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) } // TracerProvider provides Tracers that are used by instrumentation code to // trace computational workflows. // // A TracerProvider is the collection destination of all Spans from Tracers it // provides, it represents a unique telemetry collection pipeline. How that // pipeline is defined, meaning how those Spans are collected, processed, and // where they are exported, depends on its implementation. Instrumentation // authors do not need to define this implementation, rather just use the // provided Tracers to instrument code. // // Commonly, instrumentation code will accept a TracerProvider implementation // at runtime from its users or it can simply use the globally registered one // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // // Warning: Methods may be added to this interface in minor releases. See // package documentation on API implementation for information on how to set // default behavior for unimplemented methods. type TracerProvider interface { // Users of the interface can ignore this. This embedded type is only used // by implementations of this interface. See the "API Implementations" // section of the package documentation for more information. embedded.TracerProvider // Tracer returns a unique Tracer scoped to be used by instrumentation code // to trace computational workflows. The scope and identity of that // instrumentation code is uniquely defined by the name and options passed. // // The passed name needs to uniquely identify instrumentation code. // Therefore, it is recommended that name is the Go package name of the // library providing instrumentation (note: not the code being // instrumented). Instrumentation libraries can have multiple versions, // therefore, the WithInstrumentationVersion option should be used to // distinguish these different codebases. Additionally, instrumentation // libraries may sometimes use traces to communicate different domains of // workflow data (i.e. using spans to communicate workflow events only). If // this is the case, the WithScopeAttributes option should be used to // uniquely identify Tracers that handle the different domains of workflow // data. // // If the same name and options are passed multiple times, the same Tracer // will be returned (it is up to the implementation if this will be the // same underlying instance of that Tracer or not). It is not necessary to // call this multiple times with the same name and options to get an // up-to-date Tracer. All implementations will ensure any TracerProvider // configuration changes are propagated to all provided Tracers. // // If name is empty, then an implementation defined default name will be // used instead. // // This method is safe to call concurrently. Tracer(name string, options ...TracerOption) Tracer } opentelemetry-go-1.21.0/trace/trace_test.go000066400000000000000000000361741452547353200207010ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "bytes" "context" "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/attribute" ) func TestSpanContextIsValid(t *testing.T) { for _, testcase := range []struct { name string tid TraceID sid SpanID want bool }{ { name: "SpanContext.IsValid() returns true if sc has both an Trace ID and Span ID", tid: [16]byte{1}, sid: [8]byte{42}, want: true, }, { name: "SpanContext.IsValid() returns false if sc has neither an Trace ID nor Span ID", tid: TraceID([16]byte{}), sid: [8]byte{}, want: false, }, { name: "SpanContext.IsValid() returns false if sc has a Span ID but not a Trace ID", tid: TraceID([16]byte{}), sid: [8]byte{42}, want: false, }, { name: "SpanContext.IsValid() returns false if sc has a Trace ID but not a Span ID", tid: TraceID([16]byte{1}), sid: [8]byte{}, want: false, }, } { t.Run(testcase.name, func(t *testing.T) { sc := SpanContext{ traceID: testcase.tid, spanID: testcase.sid, } have := sc.IsValid() if have != testcase.want { t.Errorf("Want: %v, but have: %v", testcase.want, have) } }) } } func TestSpanContextEqual(t *testing.T) { a := SpanContext{ traceID: [16]byte{1}, spanID: [8]byte{42}, } b := SpanContext{ traceID: [16]byte{1}, spanID: [8]byte{42}, } c := SpanContext{ traceID: [16]byte{2}, spanID: [8]byte{42}, } if !a.Equal(b) { t.Error("Want: true, but have: false") } if a.Equal(c) { t.Error("Want: false, but have: true") } } func TestSpanContextIsSampled(t *testing.T) { for _, testcase := range []struct { name string tf TraceFlags want bool }{ { name: "SpanContext.IsSampled() returns false if sc is not sampled", want: false, }, { name: "SpanContext.IsSampled() returns true if sc is sampled", tf: FlagsSampled, want: true, }, } { t.Run(testcase.name, func(t *testing.T) { sc := SpanContext{ traceFlags: testcase.tf, } have := sc.IsSampled() if have != testcase.want { t.Errorf("Want: %v, but have: %v", testcase.want, have) } }) } } func TestSpanContextIsRemote(t *testing.T) { for _, testcase := range []struct { name string remote bool want bool }{ { name: "SpanContext.IsRemote() returns false if sc is not remote", want: false, }, { name: "SpanContext.IsRemote() returns true if sc is remote", remote: true, want: true, }, } { t.Run(testcase.name, func(t *testing.T) { sc := SpanContext{ remote: testcase.remote, } have := sc.IsRemote() if have != testcase.want { t.Errorf("Want: %v, but have: %v", testcase.want, have) } }) } } func TestSpanContextMarshalJSON(t *testing.T) { for _, testcase := range []struct { name string tid TraceID sid SpanID tstate TraceState tflags TraceFlags isRemote bool want []byte }{ { name: "SpanContext.MarshalJSON() returns json with partial data", tid: [16]byte{1}, sid: [8]byte{42}, want: []byte(`{"TraceID":"01000000000000000000000000000000","SpanID":"2a00000000000000","TraceFlags":"00","TraceState":"","Remote":false}`), }, { name: "SpanContext.MarshalJSON() returns json with full data", tid: [16]byte{1}, sid: [8]byte{42}, tflags: FlagsSampled, isRemote: true, tstate: TraceState{list: []member{ {Key: "foo", Value: "1"}, }}, want: []byte(`{"TraceID":"01000000000000000000000000000000","SpanID":"2a00000000000000","TraceFlags":"01","TraceState":"foo=1","Remote":true}`), }, } { t.Run(testcase.name, func(t *testing.T) { sc := SpanContext{ traceID: testcase.tid, spanID: testcase.sid, traceFlags: testcase.tflags, traceState: testcase.tstate, remote: testcase.isRemote, } have, err := sc.MarshalJSON() if err != nil { t.Errorf("Marshaling failed: %v", err) } if !bytes.Equal(have, testcase.want) { t.Errorf("Want: %v, but have: %v", string(testcase.want), string(have)) } }) } } func TestSpanIDFromHex(t *testing.T) { for _, testcase := range []struct { name string hex string sid SpanID valid bool }{ { name: "Valid SpanID", sid: SpanID([8]byte{42}), hex: "2a00000000000000", valid: true, }, { name: "Invalid SpanID with invalid length", hex: "80f198ee56343ba", valid: false, }, { name: "Invalid SpanID with invalid char", hex: "80f198ee563433g7", valid: false, }, { name: "Invalid SpanID with uppercase", hex: "80f198ee53ba86F7", valid: false, }, { name: "Invalid SpanID with zero value", hex: "0000000000000000", valid: false, }, } { t.Run(testcase.name, func(t *testing.T) { sid, err := SpanIDFromHex(testcase.hex) if testcase.valid && err != nil { t.Errorf("Expected SpanID %s to be valid but end with error %s", testcase.hex, err.Error()) } else if !testcase.valid && err == nil { t.Errorf("Expected SpanID %s to be invalid but end no error", testcase.hex) } if sid != testcase.sid { t.Errorf("Want: %v, but have: %v", testcase.sid, sid) } }) } } func TestIsValidFromHex(t *testing.T) { for _, testcase := range []struct { name string hex string tid TraceID valid bool }{ { name: "Valid TraceID", tid: TraceID([16]byte{128, 241, 152, 238, 86, 52, 59, 168, 100, 254, 139, 42, 87, 211, 239, 247}), hex: "80f198ee56343ba864fe8b2a57d3eff7", valid: true, }, { name: "Invalid TraceID with invalid length", hex: "80f198ee56343ba864fe8b2a57d3eff", valid: false, }, { name: "Invalid TraceID with invalid char", hex: "80f198ee56343ba864fe8b2a57d3efg7", valid: false, }, { name: "Invalid TraceID with uppercase", hex: "80f198ee56343ba864fe8b2a57d3efF7", valid: false, }, { name: "Invalid TraceID with zero value", hex: "00000000000000000000000000000000", valid: false, }, } { t.Run(testcase.name, func(t *testing.T) { tid, err := TraceIDFromHex(testcase.hex) if testcase.valid && err != nil { t.Errorf("Expected TraceID %s to be valid but end with error %s", testcase.hex, err.Error()) } if !testcase.valid && err == nil { t.Errorf("Expected TraceID %s to be invalid but end no error", testcase.hex) } if tid != testcase.tid { t.Errorf("Want: %v, but have: %v", testcase.tid, tid) } }) } } func TestSpanContextHasTraceID(t *testing.T) { for _, testcase := range []struct { name string tid TraceID want bool }{ { name: "SpanContext.HasTraceID() returns true if both Low and High are nonzero", tid: TraceID([16]byte{1}), want: true, }, { name: "SpanContext.HasTraceID() returns false if neither Low nor High are nonzero", tid: TraceID{}, want: false, }, } { t.Run(testcase.name, func(t *testing.T) { // proto: func (sc SpanContext) HasTraceID() bool{} sc := SpanContext{traceID: testcase.tid} have := sc.HasTraceID() if have != testcase.want { t.Errorf("Want: %v, but have: %v", testcase.want, have) } }) } } func TestSpanContextHasSpanID(t *testing.T) { for _, testcase := range []struct { name string sc SpanContext want bool }{ { name: "SpanContext.HasSpanID() returns true if self.SpanID != 0", sc: SpanContext{spanID: [8]byte{42}}, want: true, }, { name: "SpanContext.HasSpanID() returns false if self.SpanID == 0", sc: SpanContext{}, want: false, }, } { t.Run(testcase.name, func(t *testing.T) { // proto: func (sc SpanContext) HasSpanID() bool {} have := testcase.sc.HasSpanID() if have != testcase.want { t.Errorf("Want: %v, but have: %v", testcase.want, have) } }) } } func TestTraceFlagsIsSampled(t *testing.T) { for _, testcase := range []struct { name string tf TraceFlags want bool }{ { name: "sampled", tf: FlagsSampled, want: true, }, { name: "unused bits are ignored, still not sampled", tf: ^FlagsSampled, want: false, }, { name: "unused bits are ignored, still sampled", tf: FlagsSampled | ^FlagsSampled, want: true, }, { name: "not sampled/default", want: false, }, } { t.Run(testcase.name, func(t *testing.T) { have := testcase.tf.IsSampled() if have != testcase.want { t.Errorf("Want: %v, but have: %v", testcase.want, have) } }) } } func TestTraceFlagsWithSampled(t *testing.T) { for _, testcase := range []struct { name string start TraceFlags sample bool want TraceFlags }{ { name: "sampled unchanged", start: FlagsSampled, want: FlagsSampled, sample: true, }, { name: "become sampled", want: FlagsSampled, sample: true, }, { name: "unused bits are ignored, still not sampled", start: ^FlagsSampled, want: ^FlagsSampled, sample: false, }, { name: "unused bits are ignored, becomes sampled", start: ^FlagsSampled, want: FlagsSampled | ^FlagsSampled, sample: true, }, { name: "not sampled/default", sample: false, }, } { t.Run(testcase.name, func(t *testing.T) { have := testcase.start.WithSampled(testcase.sample) if have != testcase.want { t.Errorf("Want: %v, but have: %v", testcase.want, have) } }) } } func TestStringTraceID(t *testing.T) { for _, testcase := range []struct { name string tid TraceID want string }{ { name: "TraceID.String returns string representation of self.TraceID values > 0", tid: TraceID([16]byte{255}), want: "ff000000000000000000000000000000", }, { name: "TraceID.String returns string representation of self.TraceID values == 0", tid: TraceID([16]byte{}), want: "00000000000000000000000000000000", }, } { t.Run(testcase.name, func(t *testing.T) { // proto: func (t TraceID) String() string {} have := testcase.tid.String() if have != testcase.want { t.Errorf("Want: %s, but have: %s", testcase.want, have) } }) } } func TestStringSpanID(t *testing.T) { for _, testcase := range []struct { name string sid SpanID want string }{ { name: "SpanID.String returns string representation of self.SpanID values > 0", sid: SpanID([8]byte{255}), want: "ff00000000000000", }, { name: "SpanID.String returns string representation of self.SpanID values == 0", sid: SpanID([8]byte{}), want: "0000000000000000", }, } { t.Run(testcase.name, func(t *testing.T) { // proto: func (t TraceID) String() string {} have := testcase.sid.String() if have != testcase.want { t.Errorf("Want: %s, but have: %s", testcase.want, have) } }) } } func TestValidateSpanKind(t *testing.T) { tests := []struct { in SpanKind want SpanKind }{ { SpanKindUnspecified, SpanKindInternal, }, { SpanKindInternal, SpanKindInternal, }, { SpanKindServer, SpanKindServer, }, { SpanKindClient, SpanKindClient, }, { SpanKindProducer, SpanKindProducer, }, { SpanKindConsumer, SpanKindConsumer, }, } for _, test := range tests { if got := ValidateSpanKind(test.in); got != test.want { t.Errorf("ValidateSpanKind(%#v) = %#v, want %#v", test.in, got, test.want) } } } func TestSpanKindString(t *testing.T) { tests := []struct { in SpanKind want string }{ { SpanKindUnspecified, "unspecified", }, { SpanKindInternal, "internal", }, { SpanKindServer, "server", }, { SpanKindClient, "client", }, { SpanKindProducer, "producer", }, { SpanKindConsumer, "consumer", }, } for _, test := range tests { if got := test.in.String(); got != test.want { t.Errorf("%#v.String() = %#v, want %#v", test.in, got, test.want) } } } func assertSpanContextEqual(got SpanContext, want SpanContext) bool { return got.spanID == want.spanID && got.traceID == want.traceID && got.traceFlags == want.traceFlags && got.remote == want.remote && got.traceState.String() == want.traceState.String() } func TestNewSpanContext(t *testing.T) { testCases := []struct { name string config SpanContextConfig expectedSpanContext SpanContext }{ { name: "Complete SpanContext", config: SpanContextConfig{ TraceID: TraceID([16]byte{1}), SpanID: SpanID([8]byte{42}), TraceFlags: 0x1, TraceState: TraceState{list: []member{ {"foo", "bar"}, }}, }, expectedSpanContext: SpanContext{ traceID: TraceID([16]byte{1}), spanID: SpanID([8]byte{42}), traceFlags: 0x1, traceState: TraceState{list: []member{ {"foo", "bar"}, }}, }, }, { name: "Empty SpanContext", config: SpanContextConfig{}, expectedSpanContext: SpanContext{}, }, { name: "Partial SpanContext", config: SpanContextConfig{ TraceID: TraceID([16]byte{1}), SpanID: SpanID([8]byte{42}), }, expectedSpanContext: SpanContext{ traceID: TraceID([16]byte{1}), spanID: SpanID([8]byte{42}), traceFlags: 0x0, traceState: TraceState{}, }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { sctx := NewSpanContext(tc.config) if !assertSpanContextEqual(sctx, tc.expectedSpanContext) { t.Fatalf("%s: Unexpected context created: %s", tc.name, cmp.Diff(sctx, tc.expectedSpanContext)) } }) } } func TestSpanContextDerivation(t *testing.T) { from := SpanContext{} to := SpanContext{traceID: TraceID([16]byte{1})} modified := from.WithTraceID(to.TraceID()) if !assertSpanContextEqual(modified, to) { t.Fatalf("WithTraceID: Unexpected context created: %s", cmp.Diff(modified, to)) } from = to to.spanID = SpanID([8]byte{42}) modified = from.WithSpanID(to.SpanID()) if !assertSpanContextEqual(modified, to) { t.Fatalf("WithSpanID: Unexpected context created: %s", cmp.Diff(modified, to)) } from = to to.traceFlags = 0x13 modified = from.WithTraceFlags(to.TraceFlags()) if !assertSpanContextEqual(modified, to) { t.Fatalf("WithTraceFlags: Unexpected context created: %s", cmp.Diff(modified, to)) } from = to to.traceState = TraceState{list: []member{{"foo", "bar"}}} modified = from.WithTraceState(to.TraceState()) if !assertSpanContextEqual(modified, to) { t.Fatalf("WithTraceState: Unexpected context created: %s", cmp.Diff(modified, to)) } } func TestLinkFromContext(t *testing.T) { k1v1 := attribute.String("key1", "value1") spanCtx := SpanContext{traceID: TraceID([16]byte{1}), remote: true} receiverCtx := ContextWithRemoteSpanContext(context.Background(), spanCtx) link := LinkFromContext(receiverCtx, k1v1) if !assertSpanContextEqual(link.SpanContext, spanCtx) { t.Fatalf("LinkFromContext: Unexpected context created: %s", cmp.Diff(link.SpanContext, spanCtx)) } assert.Equal(t, link.Attributes[0], k1v1) } opentelemetry-go-1.21.0/trace/tracestate.go000066400000000000000000000157201452547353200206750ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // import "go.opentelemetry.io/otel/trace" import ( "encoding/json" "fmt" "regexp" "strings" ) const ( maxListMembers = 32 listDelimiter = "," // based on the W3C Trace Context specification, see // https://www.w3.org/TR/trace-context-1/#tracestate-header noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*` withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*` valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]` errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" errInvalidMember errorConst = "invalid tracestate list-member" errMemberNumber errorConst = "too many list-members in tracestate" errDuplicate errorConst = "duplicate list-member in tracestate" ) var ( noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`) withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`) valueRe = regexp.MustCompile(`^` + valueFormat + `$`) memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) ) type member struct { Key string Value string } func newMember(key, value string) (member, error) { if len(key) > 256 { return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) } if !noTenantKeyRe.MatchString(key) { if !withTenantKeyRe.MatchString(key) { return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) } atIndex := strings.LastIndex(key, "@") if atIndex > 241 || len(key)-1-atIndex > 14 { return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) } } if len(value) > 256 || !valueRe.MatchString(value) { return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) } return member{Key: key, Value: value}, nil } func parseMember(m string) (member, error) { matches := memberRe.FindStringSubmatch(m) if len(matches) != 3 { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } result, e := newMember(matches[1], matches[2]) if e != nil { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } return result, nil } // String encodes member into a string compliant with the W3C Trace Context // specification. func (m member) String() string { return fmt.Sprintf("%s=%s", m.Key, m.Value) } // TraceState provides additional vendor-specific trace identification // information across different distributed tracing systems. It represents an // immutable list consisting of key/value pairs, each pair is referred to as a // list-member. // // TraceState conforms to the W3C Trace Context specification // (https://www.w3.org/TR/trace-context-1). All operations that create or copy // a TraceState do so by validating all input and will only produce TraceState // that conform to the specification. Specifically, this means that all // list-member's key/value pairs are valid, no duplicate list-members exist, // and the maximum number of list-members (32) is not exceeded. type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` // list is the members in order. list []member } var _ json.Marshaler = TraceState{} // ParseTraceState attempts to decode a TraceState from the passed // string. It returns an error if the input is invalid according to the W3C // Trace Context specification. func ParseTraceState(tracestate string) (TraceState, error) { if tracestate == "" { return TraceState{}, nil } wrapErr := func(err error) error { return fmt.Errorf("failed to parse tracestate: %w", err) } var members []member found := make(map[string]struct{}) for _, memberStr := range strings.Split(tracestate, listDelimiter) { if len(memberStr) == 0 { continue } m, err := parseMember(memberStr) if err != nil { return TraceState{}, wrapErr(err) } if _, ok := found[m.Key]; ok { return TraceState{}, wrapErr(errDuplicate) } found[m.Key] = struct{}{} members = append(members, m) if n := len(members); n > maxListMembers { return TraceState{}, wrapErr(errMemberNumber) } } return TraceState{list: members}, nil } // MarshalJSON marshals the TraceState into JSON. func (ts TraceState) MarshalJSON() ([]byte, error) { return json.Marshal(ts.String()) } // String encodes the TraceState into a string compliant with the W3C // Trace Context specification. The returned string will be invalid if the // TraceState contains any invalid members. func (ts TraceState) String() string { members := make([]string, len(ts.list)) for i, m := range ts.list { members[i] = m.String() } return strings.Join(members, listDelimiter) } // Get returns the value paired with key from the corresponding TraceState // list-member if it exists, otherwise an empty string is returned. func (ts TraceState) Get(key string) string { for _, member := range ts.list { if member.Key == key { return member.Value } } return "" } // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always // moved to the beginning of the TraceState as specified by the W3C Trace // Context specification. // // If key or value are invalid according to the W3C Trace Context // specification an error is returned with the original TraceState. // // If adding a new list-member means the TraceState would have more members // then is allowed, the new list-member will be inserted and the right-most // list-member will be dropped in the returned TraceState. func (ts TraceState) Insert(key, value string) (TraceState, error) { m, err := newMember(key, value) if err != nil { return ts, err } cTS := ts.Delete(key) if cTS.Len()+1 <= maxListMembers { cTS.list = append(cTS.list, member{}) } // When the number of members exceeds capacity, drop the "right-most". copy(cTS.list[1:], cTS.list) cTS.list[0] = m return cTS, nil } // Delete returns a copy of the TraceState with the list-member identified by // key removed. func (ts TraceState) Delete(key string) TraceState { members := make([]member, ts.Len()) copy(members, ts.list) for i, member := range ts.list { if member.Key == key { members = append(members[:i], members[i+1:]...) // TraceState should contain no duplicate members. break } } return TraceState{list: members} } // Len returns the number of list-members in the TraceState. func (ts TraceState) Len() int { return len(ts.list) } opentelemetry-go-1.21.0/trace/tracestate_test.go000066400000000000000000000407001452547353200217300ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "encoding/json" "fmt" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // Taken from the W3C tests: // https://github.com/w3c/trace-context/blob/dcd3ad9b7d6ac36f70ff3739874b73c11b0302a1/test/test_data.json var testcases = []struct { name string in string tracestate TraceState out string err error }{ { name: "duplicate with the same value", in: "foo=1,foo=1", err: errDuplicate, }, { name: "duplicate with different values", in: "foo=1,foo=2", err: errDuplicate, }, { name: "improperly formatted key/value pair", in: "foo =1", err: errInvalidMember, }, { name: "upper case key", in: "FOO=1", err: errInvalidMember, }, { name: "key with invalid character", in: "foo.bar=1", err: errInvalidMember, }, { name: "multiple keys, one with empty tenant key", in: "foo@=1,bar=2", err: errInvalidMember, }, { name: "multiple keys, one with only tenant", in: "@foo=1,bar=2", err: errInvalidMember, }, { name: "multiple keys, one with double tenant separator", in: "foo@@bar=1,bar=2", err: errInvalidMember, }, { name: "multiple keys, one with multiple tenants", in: "foo@bar@baz=1,bar=2", err: errInvalidMember, }, { name: "key too long", in: "foo=1,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=1", err: errInvalidMember, }, { name: "key too long, with tenant", in: "foo=1,tttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt@v=1", err: errInvalidMember, }, { name: "tenant too long", in: "foo=1,t@vvvvvvvvvvvvvvv=1", err: errInvalidMember, }, { name: "multiple values for a single key", in: "foo=bar=baz", err: errInvalidMember, }, { name: "no value", in: "foo=,bar=3", err: errInvalidMember, }, { name: "too many members", in: "bar01=01,bar02=02,bar03=03,bar04=04,bar05=05,bar06=06,bar07=07,bar08=08,bar09=09,bar10=10,bar11=11,bar12=12,bar13=13,bar14=14,bar15=15,bar16=16,bar17=17,bar18=18,bar19=19,bar20=20,bar21=21,bar22=22,bar23=23,bar24=24,bar25=25,bar26=26,bar27=27,bar28=28,bar29=29,bar30=30,bar31=31,bar32=32,bar33=33", err: errMemberNumber, }, { name: "valid key/value list", in: "abcdefghijklmnopqrstuvwxyz0123456789_-*/= !\"#$%&'()*+-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~", out: "abcdefghijklmnopqrstuvwxyz0123456789_-*/= !\"#$%&'()*+-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~", tracestate: TraceState{list: []member{ { Key: "abcdefghijklmnopqrstuvwxyz0123456789_-*/", Value: " !\"#$%&'()*+-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~", }, }}, }, { name: "valid key/value list with tenant", in: "abcdefghijklmnopqrstuvwxyz0123456789_-*/@a-z0-9_-*/= !\"#$%&'()*+-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~", out: "abcdefghijklmnopqrstuvwxyz0123456789_-*/@a-z0-9_-*/= !\"#$%&'()*+-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~", tracestate: TraceState{list: []member{ { Key: "abcdefghijklmnopqrstuvwxyz0123456789_-*/@a-z0-9_-*/", Value: " !\"#$%&'()*+-./0123456789:;<>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~", }, }}, }, { name: "empty input", // Empty input should result in no error and a zero value // TraceState being returned, that TraceState should be encoded as an // empty string. }, { name: "single key and value", in: "foo=1", out: "foo=1", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, }}, }, { name: "single key and value with empty separator", in: "foo=1,", out: "foo=1", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, }}, }, { name: "multiple keys and values", in: "foo=1,bar=2", out: "foo=1,bar=2", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, {Key: "bar", Value: "2"}, }}, }, { name: "with a key at maximum length", in: "foo=1,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=1", out: "foo=1,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz=1", tracestate: TraceState{list: []member{ { Key: "foo", Value: "1", }, { Key: "zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz", Value: "1", }, }}, }, { name: "with a key and tenant at maximum length", in: "foo=1,ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt@vvvvvvvvvvvvvv=1", out: "foo=1,ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt@vvvvvvvvvvvvvv=1", tracestate: TraceState{list: []member{ { Key: "foo", Value: "1", }, { Key: "ttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttttt@vvvvvvvvvvvvvv", Value: "1", }, }}, }, { name: "with maximum members", in: "bar01=01,bar02=02,bar03=03,bar04=04,bar05=05,bar06=06,bar07=07,bar08=08,bar09=09,bar10=10,bar11=11,bar12=12,bar13=13,bar14=14,bar15=15,bar16=16,bar17=17,bar18=18,bar19=19,bar20=20,bar21=21,bar22=22,bar23=23,bar24=24,bar25=25,bar26=26,bar27=27,bar28=28,bar29=29,bar30=30,bar31=31,bar32=32", out: "bar01=01,bar02=02,bar03=03,bar04=04,bar05=05,bar06=06,bar07=07,bar08=08,bar09=09,bar10=10,bar11=11,bar12=12,bar13=13,bar14=14,bar15=15,bar16=16,bar17=17,bar18=18,bar19=19,bar20=20,bar21=21,bar22=22,bar23=23,bar24=24,bar25=25,bar26=26,bar27=27,bar28=28,bar29=29,bar30=30,bar31=31,bar32=32", tracestate: TraceState{list: []member{ {Key: "bar01", Value: "01"}, {Key: "bar02", Value: "02"}, {Key: "bar03", Value: "03"}, {Key: "bar04", Value: "04"}, {Key: "bar05", Value: "05"}, {Key: "bar06", Value: "06"}, {Key: "bar07", Value: "07"}, {Key: "bar08", Value: "08"}, {Key: "bar09", Value: "09"}, {Key: "bar10", Value: "10"}, {Key: "bar11", Value: "11"}, {Key: "bar12", Value: "12"}, {Key: "bar13", Value: "13"}, {Key: "bar14", Value: "14"}, {Key: "bar15", Value: "15"}, {Key: "bar16", Value: "16"}, {Key: "bar17", Value: "17"}, {Key: "bar18", Value: "18"}, {Key: "bar19", Value: "19"}, {Key: "bar20", Value: "20"}, {Key: "bar21", Value: "21"}, {Key: "bar22", Value: "22"}, {Key: "bar23", Value: "23"}, {Key: "bar24", Value: "24"}, {Key: "bar25", Value: "25"}, {Key: "bar26", Value: "26"}, {Key: "bar27", Value: "27"}, {Key: "bar28", Value: "28"}, {Key: "bar29", Value: "29"}, {Key: "bar30", Value: "30"}, {Key: "bar31", Value: "31"}, {Key: "bar32", Value: "32"}, }}, }, { name: "with several members", in: "foo=1,bar=2,rojo=1,congo=2,baz=3", out: "foo=1,bar=2,rojo=1,congo=2,baz=3", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, {Key: "bar", Value: "2"}, {Key: "rojo", Value: "1"}, {Key: "congo", Value: "2"}, {Key: "baz", Value: "3"}, }}, }, { name: "with tabs between members", in: "foo=1 \t , \t bar=2, \t baz=3", out: "foo=1,bar=2,baz=3", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, {Key: "bar", Value: "2"}, {Key: "baz", Value: "3"}, }}, }, { name: "with multiple tabs between members", in: "foo=1\t \t,\t \tbar=2,\t \tbaz=3", out: "foo=1,bar=2,baz=3", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, {Key: "bar", Value: "2"}, {Key: "baz", Value: "3"}, }}, }, { name: "with space at the end of the member", in: "foo=1 ", out: "foo=1", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, }}, }, { name: "with tab at the end of the member", in: "foo=1\t", out: "foo=1", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, }}, }, { name: "with tab and space at the end of the member", in: "foo=1 \t", out: "foo=1", tracestate: TraceState{list: []member{ {Key: "foo", Value: "1"}, }}, }, } var maxMembers = func() TraceState { members := make([]member, maxListMembers) for i := 0; i < maxListMembers; i++ { members[i] = member{ Key: fmt.Sprintf("key%d", i+1), Value: fmt.Sprintf("value%d", i+1), } } return TraceState{list: members} }() func TestParseTraceState(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { got, err := ParseTraceState(tc.in) assert.Equal(t, tc.tracestate, got) if tc.err != nil { assert.ErrorIs(t, err, tc.err, tc.in) } else { assert.NoError(t, err, tc.in) } }) } } func TestTraceStateString(t *testing.T) { for _, tc := range testcases { if tc.err != nil { // Only test non-zero value TraceState. continue } t.Run(tc.name, func(t *testing.T) { assert.Equal(t, tc.out, tc.tracestate.String()) }) } } func TestTraceStateMarshalJSON(t *testing.T) { for _, tc := range testcases { if tc.err != nil { // Only test non-zero value TraceState. continue } t.Run(tc.name, func(t *testing.T) { // Encode UTF-8. expected, err := json.Marshal(tc.out) require.NoError(t, err) actual, err := json.Marshal(tc.tracestate) require.NoError(t, err) assert.Equal(t, expected, actual) }) } } func TestTraceStateGet(t *testing.T) { testCases := []struct { name string key string expected string }{ { name: "OK case", key: "key16", expected: "value16", }, { name: "not found", key: "keyxx", expected: "", }, { name: "invalid W3C key", key: "key!", expected: "", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { assert.Equal(t, tc.expected, maxMembers.Get(tc.key)) }) } } func TestTraceStateDelete(t *testing.T) { ts := TraceState{list: []member{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, {Key: "key3", Value: "val3"}, }} testCases := []struct { name string key string expected TraceState }{ { name: "OK case", key: "key2", expected: TraceState{list: []member{ {Key: "key1", Value: "val1"}, {Key: "key3", Value: "val3"}, }}, }, { name: "Non-existing key", key: "keyx", expected: TraceState{list: []member{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, {Key: "key3", Value: "val3"}, }}, }, { name: "Invalid key", key: "in va lid", expected: TraceState{list: []member{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, {Key: "key3", Value: "val3"}, }}, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { assert.Equal(t, tc.expected, ts.Delete(tc.key)) }) } } func TestTraceStateInsert(t *testing.T) { ts := TraceState{list: []member{ {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, {Key: "key3", Value: "val3"}, }} testCases := []struct { name string tracestate TraceState key, value string expected TraceState err error }{ { name: "add new", tracestate: ts, key: "key4@vendor", value: "val4", expected: TraceState{list: []member{ {Key: "key4@vendor", Value: "val4"}, {Key: "key1", Value: "val1"}, {Key: "key2", Value: "val2"}, {Key: "key3", Value: "val3"}, }}, }, { name: "replace", tracestate: ts, key: "key2", value: "valX", expected: TraceState{list: []member{ {Key: "key2", Value: "valX"}, {Key: "key1", Value: "val1"}, {Key: "key3", Value: "val3"}, }}, }, { name: "invalid key", tracestate: ts, key: "key!", value: "val", expected: ts, err: errInvalidKey, }, { name: "invalid value", tracestate: ts, key: "key", value: "v=l", expected: ts, err: errInvalidValue, }, { name: "invalid key/value", tracestate: ts, key: "key!", value: "v=l", expected: ts, err: errInvalidKey, }, { name: "drop the right-most member(oldest) in queue", tracestate: maxMembers, key: "keyx", value: "valx", expected: func() TraceState { // Prepend the new element and remove the oldest one, which is over capacity. return TraceState{ list: append( []member{{Key: "keyx", Value: "valx"}}, maxMembers.list[:len(maxMembers.list)-1]..., ), } }(), }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actual, err := tc.tracestate.Insert(tc.key, tc.value) assert.ErrorIs(t, err, tc.err, tc.name) if tc.err != nil { assert.Equal(t, tc.tracestate, actual) } else { assert.Equal(t, tc.expected, actual) } }) } } func TestTraceStateLen(t *testing.T) { ts := TraceState{} assert.Equal(t, 0, ts.Len(), "zero value TraceState is empty") key := "key" ts = TraceState{list: []member{{key, "value"}}} assert.Equal(t, 1, ts.Len(), "TraceState with one value") } func TestTraceStateImmutable(t *testing.T) { k0, v0 := "k0", "v0" ts0 := TraceState{list: []member{{k0, v0}}} assert.Equal(t, v0, ts0.Get(k0)) // Insert should not modify the original. k1, v1 := "k1", "v1" ts1, err := ts0.Insert(k1, v1) require.NoError(t, err) assert.Equal(t, v0, ts0.Get(k0)) assert.Equal(t, "", ts0.Get(k1)) assert.Equal(t, v0, ts1.Get(k0)) assert.Equal(t, v1, ts1.Get(k1)) // Update should not modify the original. v2 := "v2" ts2, err := ts1.Insert(k1, v2) require.NoError(t, err) assert.Equal(t, v0, ts0.Get(k0)) assert.Equal(t, "", ts0.Get(k1)) assert.Equal(t, v0, ts1.Get(k0)) assert.Equal(t, v1, ts1.Get(k1)) assert.Equal(t, v0, ts2.Get(k0)) assert.Equal(t, v2, ts2.Get(k1)) // Delete should not modify the original. ts3 := ts2.Delete(k0) assert.Equal(t, v0, ts0.Get(k0)) assert.Equal(t, v0, ts1.Get(k0)) assert.Equal(t, v0, ts2.Get(k0)) assert.Equal(t, "", ts3.Get(k0)) } func BenchmarkParseTraceState(b *testing.B) { benches := []struct { name string in string }{ { name: "single key", in: "somewhatRealisticKeyLength=someValueAbcdefgh1234567890", }, { name: "tenant single key", in: "somewhatRealisticKeyLength@someTenant=someValueAbcdefgh1234567890", }, { name: "three keys", in: "someKeyName.One=someValue1,someKeyName.Two=someValue2,someKeyName.Three=someValue3", }, { name: "tenant three keys", in: "someKeyName.One@tenant=someValue1,someKeyName.Two@tenant=someValue2,someKeyName.Three@tenant=someValue3", }, } for _, bench := range benches { b.Run(bench.name, func(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { _, _ = ParseTraceState(bench.in) } }) } } opentelemetry-go-1.21.0/trace_test.go000066400000000000000000000023431452547353200175720ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel import ( "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" "go.opentelemetry.io/otel/trace/noop" ) type testTracerProvider struct{ embedded.TracerProvider } var _ trace.TracerProvider = &testTracerProvider{} func (*testTracerProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer { return noop.NewTracerProvider().Tracer("") } func TestMultipleGlobalTracerProvider(t *testing.T) { p1 := testTracerProvider{} p2 := noop.NewTracerProvider() SetTracerProvider(&p1) SetTracerProvider(p2) got := GetTracerProvider() assert.Equal(t, p2, got) } opentelemetry-go-1.21.0/verify_examples.sh000077500000000000000000000045151452547353200206520ustar00rootroot00000000000000#!/bin/bash # Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -euo pipefail cd $(dirname $0) TOOLS_DIR=$(pwd)/.tools if [ -z "${GOPATH}" ] ; then printf "GOPATH is not defined.\n" exit -1 fi if [ ! -d "${GOPATH}" ] ; then printf "GOPATH ${GOPATH} is invalid \n" exit -1 fi # Pre-requisites if ! git diff --quiet; then \ git status printf "\n\nError: working tree is not clean\n" exit -1 fi if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then printf "$(git log -1)" printf "\n\nError: HEAD is not pointing to a tagged version" fi make ${TOOLS_DIR}/gojq DIR_TMP="${GOPATH}/src/oteltmp/" rm -rf $DIR_TMP mkdir -p $DIR_TMP printf "Copy examples to ${DIR_TMP}\n" cp -a ./example ${DIR_TMP} # Update go.mod files printf "Update go.mod: rename module and remove replace\n" PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) for dir in $PACKAGE_DIRS; do printf " Update go.mod for $dir\n" (cd "${DIR_TMP}/${dir}" && \ # replaces is ("mod1" "mod2" …) replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ # strip double quotes replaces=("${replaces[@]%\"}") && \ replaces=("${replaces[@]#\"}") && \ # make an array (-dropreplace=mod1 -dropreplace=mod2 …) dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ go mod tidy) done printf "Update done:\n\n" # Build directories that contain main package. These directories are different than # directories that contain go.mod files. printf "Build examples:\n" EXAMPLES=$(./get_main_pkgs.sh ./example) for ex in $EXAMPLES; do printf " Build $ex in ${DIR_TMP}/${ex}\n" (cd "${DIR_TMP}/${ex}" && \ go build .) done # Cleanup printf "Remove copied files.\n" rm -rf $DIR_TMP opentelemetry-go-1.21.0/version.go000066400000000000000000000013671452547353200171270ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { return "1.21.0" } opentelemetry-go-1.21.0/version_test.go000066400000000000000000000023011452547353200201530ustar00rootroot00000000000000// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package otel_test import ( "regexp" "testing" "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel" ) // regex taken from https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string var versionRegex = regexp.MustCompile(`^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)` + `(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)` + `(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?` + `(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$`) func TestVersionSemver(t *testing.T) { v := otel.Version() assert.NotNil(t, versionRegex.FindStringSubmatch(v), "version is not semver: %s", v) } opentelemetry-go-1.21.0/versions.yaml000066400000000000000000000042411452547353200176410ustar00rootroot00000000000000# Copyright The OpenTelemetry Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. module-sets: stable-v1: version: v1.21.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test - go.opentelemetry.io/otel/example/dice - go.opentelemetry.io/otel/example/namedtracer - go.opentelemetry.io/otel/example/otel-collector - go.opentelemetry.io/otel/example/passthrough - go.opentelemetry.io/otel/example/zipkin - go.opentelemetry.io/otel/exporters/otlp/otlptrace - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp - go.opentelemetry.io/otel/exporters/stdout/stdouttrace - go.opentelemetry.io/otel/exporters/zipkin - go.opentelemetry.io/otel/metric - go.opentelemetry.io/otel/sdk - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: version: v0.44.0 modules: - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric experimental-schema: version: v0.0.7 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools