pax_global_header00006660000000000000000000000064143310203760014511gustar00rootroot0000000000000052 comment=b1a01ee95db0e690d91d7193d037447816fae4c5 opencensus-go-0.24.0/000077500000000000000000000000001433102037600143615ustar00rootroot00000000000000opencensus-go-0.24.0/.github/000077500000000000000000000000001433102037600157215ustar00rootroot00000000000000opencensus-go-0.24.0/.github/CODEOWNERS000066400000000000000000000003251433102037600173140ustar00rootroot00000000000000# Code owners file. # This file controls who is tagged for review for any given pull request. # For anything not explicitly taken by someone else: * @census-instrumentation/global-owners @rghetia opencensus-go-0.24.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001433102037600201045ustar00rootroot00000000000000opencensus-go-0.24.0/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000006761433102037600226070ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve labels: bug --- Please answer these questions before submitting a bug report. ### What version of OpenCensus are you using? ### What version of Go are you using? ### What did you do? If possible, provide a recipe for reproducing the error. ### What did you expect to see? ### What did you see instead? ### Additional context Add any other context about the problem here. opencensus-go-0.24.0/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000015531433102037600236350ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project labels: feature-request --- **NB:** Before opening a feature request against this repo, consider whether the feature should/could be implemented in OpenCensus libraries in other languages. If so, please [open an issue on opencensus-specs](https://github.com/census-instrumentation/opencensus-specs/issues/new) first. **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. opencensus-go-0.24.0/.github/workflows/000077500000000000000000000000001433102037600177565ustar00rootroot00000000000000opencensus-go-0.24.0/.github/workflows/build.yml000066400000000000000000000007671433102037600216120ustar00rootroot00000000000000name: Build on: pull_request: branches: - master jobs: build: runs-on: ubuntu-20.04 env: GO11MODULE: 'on' steps: - uses: actions/checkout@v2 with: submodules: true fetch-depth: 0 # we want all tags for version check. lfs: true - uses: actions/setup-go@v2 with: go-version: '^1.11.0' - name: Build and test run: make install-tools && make travis-ci && go run internal/check/version.go opencensus-go-0.24.0/.gitignore000066400000000000000000000002551433102037600163530ustar00rootroot00000000000000/.idea/ # go.opencensus.io/exporter/aws /exporter/aws/ # Exclude vendor, use dep ensure after checkout: /vendor/github.com/ /vendor/golang.org/ /vendor/google.golang.org/ opencensus-go-0.24.0/AUTHORS000066400000000000000000000000141433102037600154240ustar00rootroot00000000000000Google Inc. opencensus-go-0.24.0/CONTRIBUTING.md000066400000000000000000000033371433102037600166200ustar00rootroot00000000000000# How to contribute We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow. ## Contributor License Agreement Contributions to this project must be accompanied by a Contributor License Agreement. You (or your employer) retain the copyright to your contribution, this simply gives us permission to use and redistribute your contributions as part of the project. Head over to to see your current agreements on file or to sign a new one. You generally only need to submit a CLA once, so if you've already submitted one (even if it was for a different project), you probably don't need to do it again. ## Code reviews All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help] for more information on using pull requests. [GitHub Help]: https://help.github.com/articles/about-pull-requests/ ## Instructions Fork the repo, checkout the upstream repo to your GOPATH by: ``` $ go get -d go.opencensus.io ``` Add your fork as an origin: ``` cd $(go env GOPATH)/src/go.opencensus.io git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git ``` Run tests: ``` $ make install-tools # Only first time. $ make ``` Checkout a new branch, make modifications and push the branch to your fork: ``` $ git checkout -b feature # edit files $ git commit $ git push fork feature ``` Open a pull request against the main opencensus-go repo. ## General Notes This project uses Appveyor and Travis for CI. The dependencies are managed with `go mod` if you work with the sources under your `$GOPATH` you need to set the environment variable `GO111MODULE=on`.opencensus-go-0.24.0/LICENSE000066400000000000000000000261351433102037600153750ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.opencensus-go-0.24.0/Makefile000066400000000000000000000052571433102037600160320ustar00rootroot00000000000000# TODO: Fix this on windows. ALL_SRC := $(shell find . -name '*.go' \ -not -path './vendor/*' \ -not -path '*/gen-go/*' \ -type f | sort) ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) GOTEST_OPT?=-v -race -timeout 30s GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic GOTEST=go test GOIMPORTS=goimports GOLINT=golint GOVET=go vet EMBEDMD=embedmd # TODO decide if we need to change these names. TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') .DEFAULT_GOAL := imports-lint-vet-embedmd-test .PHONY: imports-lint-vet-embedmd-test imports-lint-vet-embedmd-test: imports lint vet embedmd test # TODO enable test-with-coverage in tavis .PHONY: travis-ci travis-ci: imports lint vet embedmd test test-386 all-pkgs: @echo $(ALL_PKGS) | tr ' ' '\n' | sort all-srcs: @echo $(ALL_SRC) | tr ' ' '\n' | sort .PHONY: test test: $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) .PHONY: test-386 test-386: GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) .PHONY: test-with-coverage test-with-coverage: $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) .PHONY: imports imports: @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ if [ "$$IMPORTSOUT" ]; then \ echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ echo "$$IMPORTSOUT\n"; \ exit 1; \ else \ echo "Imports finished successfully"; \ fi .PHONY: lint lint: @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ if [ "$$LINTOUT" ]; then \ echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ echo "$$LINTOUT\n"; \ exit 1; \ else \ echo "Lint finished successfully"; \ fi .PHONY: vet vet: # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ if [ "$$VETOUT" ]; then \ echo "$(GOVET) FAILED => go vet the following files:\n"; \ echo "$$VETOUT\n"; \ exit 1; \ else \ echo "Vet finished successfully"; \ fi .PHONY: embedmd embedmd: @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ if [ "$$EMBEDMDOUT" ]; then \ echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ echo "$$EMBEDMDOUT\n"; \ exit 1; \ else \ echo "Embedmd finished successfully"; \ fi .PHONY: install-tools install-tools: go install golang.org/x/lint/golint@latest go install golang.org/x/tools/cmd/cover@latest go install golang.org/x/tools/cmd/goimports@latest go install github.com/rakyll/embedmd@latest opencensus-go-0.24.0/README.md000066400000000000000000000240661433102037600156500ustar00rootroot00000000000000# OpenCensus Libraries for Go [![Build Status][travis-image]][travis-url] [![Windows Build Status][appveyor-image]][appveyor-url] [![GoDoc][godoc-image]][godoc-url] [![Gitter chat][gitter-image]][gitter-url] OpenCensus Go is a Go implementation of OpenCensus, a toolkit for collecting application performance and behavior monitoring data. Currently it consists of three major components: tags, stats and tracing. #### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). ## Installation ``` $ go get -u go.opencensus.io ``` The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). The use of vendoring or a dependency management tool is recommended. ## Prerequisites OpenCensus Go libraries require Go 1.8 or later. ## Getting Started The easiest way to get started using OpenCensus in your application is to use an existing integration with your RPC framework: * [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) * [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) * [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) * [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) * [Groupcache](https://godoc.org/github.com/orijtech/groupcache) * [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) * [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) * [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) * [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) * [Memcache](https://godoc.org/github.com/orijtech/gomemcache) If you're using a framework not listed here, you could either implement your own middleware for your framework or use [custom stats](#stats) and [spans](#spans) directly in your application. ## Exporters OpenCensus can export instrumentation data to various backends. OpenCensus has exporter implementations for the following, users can implement their own exporters by implementing the exporter interfaces ([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), [trace](https://godoc.org/go.opencensus.io/trace#Exporter)): * [Prometheus][exporter-prom] for stats * [OpenZipkin][exporter-zipkin] for traces * [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces * [Jaeger][exporter-jaeger] for traces * [AWS X-Ray][exporter-xray] for traces * [Datadog][exporter-datadog] for stats and traces * [Graphite][exporter-graphite] for stats * [Honeycomb][exporter-honeycomb] for traces * [New Relic][exporter-newrelic] for stats and traces ## Overview ![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) In a microservices environment, a user request may go through multiple services until there is a response. OpenCensus allows you to instrument your services and collect diagnostics data all through your services end-to-end. ## Tags Tags represent propagated key-value pairs. They are propagated using `context.Context` in the same process or can be encoded to be transmitted on the wire. Usually, this will be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` for gRPC. Package `tag` allows adding or modifying tags in the current context. [embedmd]:# (internal/readme/tags.go new) ```go ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), ) if err != nil { log.Fatal(err) } ``` ## Stats OpenCensus is a low-overhead framework even if instrumentation is always enabled. In order to be so, it is optimized to make recording of data points fast and separate from the data aggregation. OpenCensus stats collection happens in two stages: * Definition of measures and recording of data points * Definition of views and aggregation of the recorded data ### Recording Measurements are data points associated with a measure. Recording implicitly tags the set of Measurements with the tags from the provided context: [embedmd]:# (internal/readme/stats.go record) ```go stats.Record(ctx, videoSize.M(102478)) ``` ### Views Views are how Measures are aggregated. You can think of them as queries over the set of recorded data points (measurements). Views have two parts: the tags to group by and the aggregation type used. Currently three types of aggregations are supported: * CountAggregation is used to count the number of times a sample was recorded. * DistributionAggregation is used to provide a histogram of the values of the samples. * SumAggregation is used to sum up all sample values. [embedmd]:# (internal/readme/stats.go aggs) ```go distAgg := view.Distribution(1<<32, 2<<32, 3<<32) countAgg := view.Count() sumAgg := view.Sum() ``` Here we create a view with the DistributionAggregation over our measure. [embedmd]:# (internal/readme/stats.go view) ```go if err := view.Register(&view.View{ Name: "example.com/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), }); err != nil { log.Fatalf("Failed to register view: %v", err) } ``` Register begins collecting data for the view. Registered views' data will be exported via the registered exporters. ## Traces A distributed trace tracks the progression of a single user request as it is handled by the services and processes that make up an application. Each step is called a span in the trace. Spans include metadata about the step, including especially the time spent in the step, called the span’s latency. Below you see a trace and several spans underneath it. ![Traces and spans](https://i.imgur.com/7hZwRVj.png) ### Spans Span is the unit step in a trace. Each span has a name, latency, status and additional metadata. Below we are starting a span for a cache read and ending it when we are done: [embedmd]:# (internal/readme/trace.go startend) ```go ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() // Do work to get from cache. ``` ### Propagation Spans can have parents or can be root spans if they don't have any parents. The current span is propagated in-process and across the network to allow associating new child spans with the parent. In the same process, `context.Context` is used to propagate spans. `trace.StartSpan` creates a new span as a root if the current context doesn't contain a span. Or, it creates a child of the span that is already in current context. The returned context can be used to keep propagating the newly created span in the current context. [embedmd]:# (internal/readme/trace.go startend) ```go ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() // Do work to get from cache. ``` Across the network, OpenCensus provides different propagation methods for different protocols. * gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). * HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) by default but can be configured to use a custom propagation method by setting another [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). ## Execution Tracer With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) for an example of their mutual use. ## Profiles OpenCensus tags can be applied as profiler labels for users who are on Go 1.9 and above. [embedmd]:# (internal/readme/tags.go profiler) ```go ctx, err = tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Insert(userIDKey, "fff0989878"), ) if err != nil { log.Fatal(err) } tag.Do(ctx, func(ctx context.Context) { // Do work. // When profiling is on, samples will be // recorded with the key/values from the tag map. }) ``` A screenshot of the CPU profile from the program above: ![CPU profile](https://i.imgur.com/jBKjlkw.png) ## Deprecation Policy Before version 1.0.0, the following deprecation policy will be observed: No backwards-incompatible changes will be made except for the removal of symbols that have been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release removing the *Deprecated* functionality will be made no sooner than 28 days after the first release in which the functionality was marked *Deprecated*. [travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master [travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go [appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true [appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master [godoc-image]: https://godoc.org/go.opencensus.io?status.svg [godoc-url]: https://godoc.org/go.opencensus.io [gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg [gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge [new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap [new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace [exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus [exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver [exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin [exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger [exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog [exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite [exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter [exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go opencensus-go-0.24.0/appveyor.yml000066400000000000000000000006341433102037600167540ustar00rootroot00000000000000version: "{build}" platform: x64 clone_folder: c:\gopath\src\go.opencensus.io environment: GOPATH: 'c:\gopath' GO111MODULE: 'on' CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 stack: go 1.11 before_test: - go version - go env build: false deploy: false test_script: - cd %APPVEYOR_BUILD_FOLDER% - go build -v .\... - go test -v .\... # No -race because cgo is disabled opencensus-go-0.24.0/examples/000077500000000000000000000000001433102037600161775ustar00rootroot00000000000000opencensus-go-0.24.0/examples/derived_gauges/000077500000000000000000000000001433102037600211545ustar00rootroot00000000000000opencensus-go-0.24.0/examples/derived_gauges/README.md000066400000000000000000000225301433102037600224350ustar00rootroot00000000000000# Derived Gauge Example Table of Contents ================= - [Summary](#summary) - [Run the example](#run-the-example) - [How to use derived gauges?](#how-to-use-derived-gauges-) * [Initialize Metric Registry](#initialize-metric-registry) * [Create derived gauge metric](#create-derived-gauge-metric) * [Create derived gauge entry](#create-derived-gauge-entry) * [Implement derived gauge interface](#implement-derived-gauge-interface) * [Complete Example](#complete-example) ## Summary [top](#Table-of-Contents) This example demonstrates the use of derived gauges. It is a simple interactive program of consumer and producer. User can input number of items to produce. Producer produces specified number of items. Consumer consumes randomly consumes 1-5 items in each attempt. It then sleeps randomly between 1-10 seconds before the next attempt. There are two metrics collected to monitor the queue. 1. **queue_size**: It is an instantaneous queue size represented using derived gauge int64. 1. **queue_seconds_since_processed_last**: It is the time elaspsed in seconds since the last time when the queue was consumed. It is represented using derived gauge float64. This example shows how to use gauge metrics. The program records two gauges. These metrics are read when exporter scrapes them. In this example log exporter is used to log the data into a file. Metrics can be viewed at [file:///tmp/metrics.log](file:///tmp/metrics.log) once the program is running. Alternatively you could do `tail -f /tmp/metrics.log` on Linux/OSx. Enter different value for number of items to queue and fetch the metrics using above url to see the variation in the metrics. ## Run the example ``` $ go get go.opencensus.io/examples/derived_gauges/... ``` then: ``` $ go run $(go env GOPATH)/src/go.opencensus.io/examples/derived_gauges/derived_gauge.go ``` ## How to use derived gauges? ### Initialize Metric Registry Create a new metric registry for all your metrics. This step is a general step for any kind of metrics and not specific to gauges. Register newly created registry with global producer manager. [embedmd]:# (derived_gauge.go reg) ```go r := metric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) ``` ### Create derived gauge metric Create a gauge metric. In this example we have two metrics. **queue_size** [embedmd]:# (derived_gauge.go size) ```go queueSizeGauge, err := r.AddInt64DerivedGauge( "queue_size", metric.WithDescription("Instantaneous queue size"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating queue size derived gauge, error %v\n", err) } ``` **queue_seconds_since_processed_last** [embedmd]:# (derived_gauge.go elapsed) ```go elapsedSeconds, err := r.AddFloat64DerivedGauge( "queue_seconds_since_processed_last", metric.WithDescription("time elapsed since last time the queue was processed"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating queue_seconds_since_processed_last derived gauge, error %v\n", err) } ``` ### Create derived gauge entry Now, create or insert a unique entry an interface `ToInt64` for a given set of tags. Since we are not using any tags in this example we only insert one entry for each derived gauge metric. **insert interface for queue_size** [embedmd]:# (derived_gauge.go entrySize) ```go err = queueSizeGauge.UpsertEntry(q.Size) if err != nil { log.Fatalf("error getting queue size derived gauge entry, error %v\n", err) } ``` **insert interface for queue_seconds_since_processed_lasto** [embedmd]:# (derived_gauge.go entryElapsed) ```go err = elapsedSeconds.UpsertEntry(q.Elapsed) if err != nil { log.Fatalf("error getting queue_seconds_since_processed_last derived gauge entry, error %v\n", err) } ``` ### Implement derived gauge interface In order for metrics reader to read the value of your dervied gauge it must implement ToFloat64 or ToInt64 [embedmd]:# (derived_gauge.go toint64) ```go func (q *queue) Size() int64 { q.mu.Lock() defer q.mu.Unlock() return int64(q.size) } ``` [embedmd]:# (derived_gauge.go tofloat64) ```go func (q *queue) Elapsed() float64 { q.mu.Lock() defer q.mu.Unlock() return time.Since(q.lastConsumed).Seconds() } ``` ### Complete Example [embedmd]:# (derived_gauge.go entire) ```go // This example demonstrates the use of derived gauges. It is a simple interactive program of consumer // and producer. User can input number of items to produce. Producer produces specified number of // items. Consumer randomly consumes 1-5 items in each attempt. It then sleeps randomly // between 1-10 seconds before the next attempt. Two metrics collected to monitor the queue. // // # Metrics // // * queue_size: It is an instantaneous queue size represented using derived gauge int64. // // * queue_seconds_since_processed_last: It is the time elaspsed in seconds since the last time // when the queue was consumed. It is represented using derived gauge float64. package main import ( "bufio" "fmt" "log" "math/rand" "os" "strconv" "strings" "sync" "time" "go.opencensus.io/examples/exporter" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) const ( metricsLogFile = "/tmp/metrics.log" ) type queue struct { size int lastConsumed time.Time mu sync.Mutex q []int } var q = &queue{} const ( maxItemsToConsumePerAttempt = 25 ) func init() { q.q = make([]int, 100) } // consume randomly dequeues upto 5 items from the queue func (q *queue) consume() { q.mu.Lock() defer q.mu.Unlock() consumeCount := rand.Int() % maxItemsToConsumePerAttempt i := 0 for i = 0; i < consumeCount; i++ { if q.size > 0 { q.q = q.q[1:] q.size-- } else { break } } if i > 0 { q.lastConsumed = time.Now() } } // produce randomly enqueues upto 5 items from the queue func (q *queue) produce(count int) { q.mu.Lock() defer q.mu.Unlock() for i := 0; i < count; i++ { v := rand.Int() % 100 q.q = append(q.q, v) q.size++ } fmt.Printf("queued %d items, queue size is %d\n", count, q.size) } func (q *queue) runConsumer(interval time.Duration, cQuit chan bool) { t := time.NewTicker(interval) for { select { case <-t.C: q.consume() case <-cQuit: t.Stop() return } } } // Size reports instantaneous queue size. // This is the interface supplied while creating an entry for derived gauge int64. func (q *queue) Size() int64 { q.mu.Lock() defer q.mu.Unlock() return int64(q.size) } // Elapsed reports time elapsed since the last time an item was consumed from the queue. // This is the interface supplied while creating an entry for derived gauge float64. func (q *queue) Elapsed() float64 { q.mu.Lock() defer q.mu.Unlock() return time.Since(q.lastConsumed).Seconds() } func getInput() int { reader := bufio.NewReader(os.Stdin) limit := 100 for { fmt.Printf("Enter number of items to put in consumer queue? [1-%d]: ", limit) text, _ := reader.ReadString('\n') count, err := strconv.Atoi(strings.TrimSuffix(text, "\n")) if err == nil { if count < 1 || count > limit { fmt.Printf("invalid value %s\n", text) continue } return count } fmt.Printf("error %v\n", err) } } func doWork() { fmt.Printf("Program monitors queue using two derived gauge metrics.\n") fmt.Printf(" 1. queue_size = the instantaneous size of the queue.\n") fmt.Printf(" 2. queue_seconds_since_processed_last = the number of seconds elapsed since last time the queue was processed.\n") fmt.Printf("\nGo to file://%s to see the metrics. OR do `tail -f %s` in another terminal\n\n\n", metricsLogFile, metricsLogFile) // Take a number of items to queue as an input from the user // and enqueue the same number of items on to the consumer queue. for { count := getInput() q.produce(count) fmt.Printf("press CTRL+C to terminate the program\n") } } func main() { // Using logexporter but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { log.Fatalf("Error creating log exporter: %v", err) } exporter.Start() defer exporter.Stop() defer exporter.Close() // Create metric registry and register it with global producer manager. r := metric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) // Create Int64DerviedGauge queueSizeGauge, err := r.AddInt64DerivedGauge( "queue_size", metric.WithDescription("Instantaneous queue size"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating queue size derived gauge, error %v\n", err) } err = queueSizeGauge.UpsertEntry(q.Size) if err != nil { log.Fatalf("error getting queue size derived gauge entry, error %v\n", err) } // Create Float64DerviedGauge elapsedSeconds, err := r.AddFloat64DerivedGauge( "queue_seconds_since_processed_last", metric.WithDescription("time elapsed since last time the queue was processed"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating queue_seconds_since_processed_last derived gauge, error %v\n", err) } err = elapsedSeconds.UpsertEntry(q.Elapsed) if err != nil { log.Fatalf("error getting queue_seconds_since_processed_last derived gauge entry, error %v\n", err) } quit := make(chan bool) defer func() { close(quit) }() // Run consumer and producer go q.runConsumer(5*time.Second, quit) for { doWork() } } ``` opencensus-go-0.24.0/examples/derived_gauges/derived_gauge.go000066400000000000000000000137211433102037600243010ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // START entire // This example demonstrates the use of derived gauges. It is a simple interactive program of consumer // and producer. User can input number of items to produce. Producer produces specified number of // items. Consumer randomly consumes 1-5 items in each attempt. It then sleeps randomly // between 1-10 seconds before the next attempt. Two metrics collected to monitor the queue. // // # Metrics // // * queue_size: It is an instantaneous queue size represented using derived gauge int64. // // * queue_seconds_since_processed_last: It is the time elaspsed in seconds since the last time // when the queue was consumed. It is represented using derived gauge float64. package main import ( "bufio" "fmt" "log" "math/rand" "os" "strconv" "strings" "sync" "time" "go.opencensus.io/examples/exporter" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) const ( metricsLogFile = "/tmp/metrics.log" ) type queue struct { size int lastConsumed time.Time mu sync.Mutex q []int } var q = &queue{} const ( maxItemsToConsumePerAttempt = 25 ) func init() { q.q = make([]int, 100) } // consume randomly dequeues upto 5 items from the queue func (q *queue) consume() { q.mu.Lock() defer q.mu.Unlock() consumeCount := rand.Int() % maxItemsToConsumePerAttempt i := 0 for i = 0; i < consumeCount; i++ { if q.size > 0 { q.q = q.q[1:] q.size-- } else { break } } if i > 0 { q.lastConsumed = time.Now() } } // produce randomly enqueues upto 5 items from the queue func (q *queue) produce(count int) { q.mu.Lock() defer q.mu.Unlock() for i := 0; i < count; i++ { v := rand.Int() % 100 q.q = append(q.q, v) q.size++ } fmt.Printf("queued %d items, queue size is %d\n", count, q.size) } func (q *queue) runConsumer(interval time.Duration, cQuit chan bool) { t := time.NewTicker(interval) for { select { case <-t.C: q.consume() case <-cQuit: t.Stop() return } } } // Size reports instantaneous queue size. // This is the interface supplied while creating an entry for derived gauge int64. // START toint64 func (q *queue) Size() int64 { q.mu.Lock() defer q.mu.Unlock() return int64(q.size) } // END toint64 // Elapsed reports time elapsed since the last time an item was consumed from the queue. // This is the interface supplied while creating an entry for derived gauge float64. // START tofloat64 func (q *queue) Elapsed() float64 { q.mu.Lock() defer q.mu.Unlock() return time.Since(q.lastConsumed).Seconds() } // END tofloat64 func getInput() int { reader := bufio.NewReader(os.Stdin) limit := 100 for { fmt.Printf("Enter number of items to put in consumer queue? [1-%d]: ", limit) text, _ := reader.ReadString('\n') count, err := strconv.Atoi(strings.TrimSuffix(text, "\n")) if err == nil { if count < 1 || count > limit { fmt.Printf("invalid value %s\n", text) continue } return count } fmt.Printf("error %v\n", err) } } func doWork() { fmt.Printf("Program monitors queue using two derived gauge metrics.\n") fmt.Printf(" 1. queue_size = the instantaneous size of the queue.\n") fmt.Printf(" 2. queue_seconds_since_processed_last = the number of seconds elapsed since last time the queue was processed.\n") fmt.Printf("\nGo to file://%s to see the metrics. OR do `tail -f %s` in another terminal\n\n\n", metricsLogFile, metricsLogFile) // Take a number of items to queue as an input from the user // and enqueue the same number of items on to the consumer queue. for { count := getInput() q.produce(count) fmt.Printf("press CTRL+C to terminate the program\n") } } func main() { // Using logexporter but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { log.Fatalf("Error creating log exporter: %v", err) } exporter.Start() defer exporter.Stop() defer exporter.Close() // Create metric registry and register it with global producer manager. // START reg r := metric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) // END reg // Create Int64DerviedGauge // START size queueSizeGauge, err := r.AddInt64DerivedGauge( "queue_size", metric.WithDescription("Instantaneous queue size"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating queue size derived gauge, error %v\n", err) } // END size // START entrySize err = queueSizeGauge.UpsertEntry(q.Size) if err != nil { log.Fatalf("error getting queue size derived gauge entry, error %v\n", err) } // END entrySize // Create Float64DerviedGauge // START elapsed elapsedSeconds, err := r.AddFloat64DerivedGauge( "queue_seconds_since_processed_last", metric.WithDescription("time elapsed since last time the queue was processed"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating queue_seconds_since_processed_last derived gauge, error %v\n", err) } // END elapsed // START entryElapsed err = elapsedSeconds.UpsertEntry(q.Elapsed) if err != nil { log.Fatalf("error getting queue_seconds_since_processed_last derived gauge entry, error %v\n", err) } // END entryElapsed quit := make(chan bool) defer func() { close(quit) }() // Run consumer and producer go q.runConsumer(5*time.Second, quit) for { doWork() } } // END entire opencensus-go-0.24.0/examples/exporter/000077500000000000000000000000001433102037600200475ustar00rootroot00000000000000opencensus-go-0.24.0/examples/exporter/exporter.go000066400000000000000000000057311433102037600222540ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package exporter // import "go.opencensus.io/examples/exporter" import ( "encoding/hex" "fmt" "regexp" "time" "go.opencensus.io/stats/view" "go.opencensus.io/trace" ) // indent these many spaces const indent = " " // reZero provides a simple way to detect an empty ID var reZero = regexp.MustCompile(`^0+$`) // PrintExporter is a stats and trace exporter that logs // the exported data to the console. // // The intent is help new users familiarize themselves with the // capabilities of opencensus. // // This should NOT be used for production workloads. type PrintExporter struct{} // ExportView logs the view data. func (e *PrintExporter) ExportView(vd *view.Data) { for _, row := range vd.Rows { fmt.Printf("%v %-45s", vd.End.Format("15:04:05"), vd.View.Name) switch v := row.Data.(type) { case *view.DistributionData: fmt.Printf("distribution: min=%.1f max=%.1f mean=%.1f", v.Min, v.Max, v.Mean) case *view.CountData: fmt.Printf("count: value=%v", v.Value) case *view.SumData: fmt.Printf("sum: value=%v", v.Value) case *view.LastValueData: fmt.Printf("last: value=%v", v.Value) } fmt.Println() for _, tag := range row.Tags { fmt.Printf("%v- %v=%v\n", indent, tag.Key.Name(), tag.Value) } } } // ExportSpan logs the trace span. func (e *PrintExporter) ExportSpan(vd *trace.SpanData) { var ( traceID = hex.EncodeToString(vd.SpanContext.TraceID[:]) spanID = hex.EncodeToString(vd.SpanContext.SpanID[:]) parentSpanID = hex.EncodeToString(vd.ParentSpanID[:]) ) fmt.Println() fmt.Println("#----------------------------------------------") fmt.Println() fmt.Println("TraceID: ", traceID) fmt.Println("SpanID: ", spanID) if !reZero.MatchString(parentSpanID) { fmt.Println("ParentSpanID:", parentSpanID) } fmt.Println() fmt.Printf("Span: %v\n", vd.Name) fmt.Printf("Status: %v [%v]\n", vd.Status.Message, vd.Status.Code) fmt.Printf("Elapsed: %v\n", vd.EndTime.Sub(vd.StartTime).Round(time.Millisecond)) if len(vd.Annotations) > 0 { fmt.Println() fmt.Println("Annotations:") for _, item := range vd.Annotations { fmt.Print(indent, item.Message) for k, v := range item.Attributes { fmt.Printf(" %v=%v", k, v) } fmt.Println() } } if len(vd.Attributes) > 0 { fmt.Println() fmt.Println("Attributes:") for k, v := range vd.Attributes { fmt.Printf("%v- %v=%v\n", indent, k, v) } } } opencensus-go-0.24.0/examples/exporter/logexporter.go000066400000000000000000000142761433102037600227620ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package exporter contains a log exporter that supports exporting // OpenCensus metrics and spans to a logging framework. package exporter // import "go.opencensus.io/examples/exporter" import ( "context" "encoding/hex" "fmt" "log" "os" "sync" "time" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" "go.opencensus.io/trace" ) // LogExporter exports metrics and span to log file type LogExporter struct { reader *metricexport.Reader ir *metricexport.IntervalReader initReaderOnce sync.Once o Options tFile *os.File mFile *os.File tLogger *log.Logger mLogger *log.Logger } // Options provides options for LogExporter type Options struct { // ReportingInterval is a time interval between two successive metrics // export. ReportingInterval time.Duration // MetricsLogFile is path where exported metrics are logged. // If it is nil then the metrics are logged on console MetricsLogFile string // TracesLogFile is path where exported span data are logged. // If it is nil then the span data are logged on console TracesLogFile string } func getLogger(filepath string) (*log.Logger, *os.File, error) { if filepath == "" { return log.New(os.Stdout, "", 0), nil, nil } f, err := os.OpenFile(filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) if err != nil { return nil, nil, err } return log.New(f, "", 0), f, nil } // NewLogExporter creates new log exporter. func NewLogExporter(options Options) (*LogExporter, error) { e := &LogExporter{reader: metricexport.NewReader(), o: options} var err error e.tLogger, e.tFile, err = getLogger(options.TracesLogFile) if err != nil { return nil, err } e.mLogger, e.mFile, err = getLogger(options.MetricsLogFile) if err != nil { return nil, err } return e, nil } func printMetricDescriptor(metric *metricdata.Metric) string { d := metric.Descriptor return fmt.Sprintf("name: %s, type: %s, unit: %s ", d.Name, d.Type, d.Unit) } func printLabels(metric *metricdata.Metric, values []metricdata.LabelValue) string { d := metric.Descriptor kv := []string{} for i, k := range d.LabelKeys { kv = append(kv, fmt.Sprintf("%s=%v", k, values[i])) } return fmt.Sprintf("%v", kv) } func printPoint(point metricdata.Point) string { switch v := point.Value.(type) { case *metricdata.Distribution: dv := v return fmt.Sprintf("count=%v sum=%v sum_sq_dev=%v, buckets=%v", dv.Count, dv.Sum, dv.SumOfSquaredDeviation, dv.Buckets) default: return fmt.Sprintf("value=%v", point.Value) } } // Start starts the metric and span data exporter. func (e *LogExporter) Start() error { trace.RegisterExporter(e) e.initReaderOnce.Do(func() { e.ir, _ = metricexport.NewIntervalReader(&metricexport.Reader{}, e) }) e.ir.ReportingInterval = e.o.ReportingInterval return e.ir.Start() } // Stop stops the metric and span data exporter. func (e *LogExporter) Stop() { trace.UnregisterExporter(e) e.ir.Stop() } // Close closes any files that were opened for logging. func (e *LogExporter) Close() { if e.tFile != nil { e.tFile.Close() e.tFile = nil } if e.mFile != nil { e.mFile.Close() e.mFile = nil } } // ExportMetrics exports to log. func (e *LogExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { for _, metric := range metrics { for _, ts := range metric.TimeSeries { for _, point := range ts.Points { e.mLogger.Println("#----------------------------------------------") e.mLogger.Println() e.mLogger.Printf("Metric: %s\n Labels: %s\n Value : %s\n", printMetricDescriptor(metric), printLabels(metric, ts.LabelValues), printPoint(point)) e.mLogger.Println() } } } return nil } // ExportSpan exports a SpanData to log func (e *LogExporter) ExportSpan(sd *trace.SpanData) { var ( traceID = hex.EncodeToString(sd.SpanContext.TraceID[:]) spanID = hex.EncodeToString(sd.SpanContext.SpanID[:]) parentSpanID = hex.EncodeToString(sd.ParentSpanID[:]) ) e.tLogger.Println() e.tLogger.Println("#----------------------------------------------") e.tLogger.Println() e.tLogger.Println("TraceID: ", traceID) e.tLogger.Println("SpanID: ", spanID) if !reZero.MatchString(parentSpanID) { e.tLogger.Println("ParentSpanID:", parentSpanID) } e.tLogger.Println() e.tLogger.Printf("Span: %v\n", sd.Name) e.tLogger.Printf("Status: %v [%v]\n", sd.Status.Message, sd.Status.Code) e.tLogger.Printf("Elapsed: %v\n", sd.EndTime.Sub(sd.StartTime).Round(time.Millisecond)) spanKinds := map[int]string{ 1: "Server", 2: "Client", } if spanKind, ok := spanKinds[sd.SpanKind]; ok { e.tLogger.Printf("SpanKind: %s\n", spanKind) } if len(sd.Annotations) > 0 { e.tLogger.Println() e.tLogger.Println("Annotations:") for _, item := range sd.Annotations { e.tLogger.Print(indent, item.Message) for k, v := range item.Attributes { e.tLogger.Printf(" %v=%v", k, v) } e.tLogger.Println() } } if len(sd.Attributes) > 0 { e.tLogger.Println() e.tLogger.Println("Attributes:") for k, v := range sd.Attributes { e.tLogger.Printf("%v- %v=%v\n", indent, k, v) } } if len(sd.MessageEvents) > 0 { eventTypes := map[trace.MessageEventType]string{ trace.MessageEventTypeSent: "Sent", trace.MessageEventTypeRecv: "Received", } e.tLogger.Println() e.tLogger.Println("MessageEvents:") for _, item := range sd.MessageEvents { if eventType, ok := eventTypes[item.EventType]; ok { e.tLogger.Print(eventType) } e.tLogger.Printf("UncompressedByteSize: %v", item.UncompressedByteSize) e.tLogger.Printf("CompressedByteSize: %v", item.CompressedByteSize) e.tLogger.Println() } } } opencensus-go-0.24.0/examples/gauges/000077500000000000000000000000001433102037600174525ustar00rootroot00000000000000opencensus-go-0.24.0/examples/gauges/README.md000066400000000000000000000210651433102037600207350ustar00rootroot00000000000000# Gauges Example Table of Contents ================= - [Summary](#summary) - [Run the example](#run-the-example) - [How to use gauges?](#how-to-use-gauges-) * [Initialize Metric Registry](#initialize-metric-registry) * [Create gauge metric](#create-gauge-metric) * [Create gauge entry](#create-gauge-entry) * [Set gauge values](#set-gauge-values) * [Complete Example](#complete-example) ## Summary [top](#Table-of-Contents) This example shows how to use gauge metrics. The program records two gauges. 1. **process_heap_alloc (int64)**: Total bytes used by objects allocated in the heap. It includes objects currently used and objects that are freed but not garbage collected. 1. **process_heap_idle_to_alloc_ratio (float64)**: It is the ratio of Idle bytes to allocated bytes in the heap. It periodically runs a function that retrieves the memory stats and updates the above two metrics. These metrics are then exported using log exporter. Metrics can be viewed at [file:///tmp/metrics.log](file:///tmp/metrics.log) once the program is running. Alternatively you could do `tail -f /tmp/metrics.log` on Linux/OSx. The program lets you choose the amount of memory (in MB) to consume. Choose different values and query the metrics to see the change in metrics. ## Run the example ``` $ go get go.opencensus.io/examples/gauges/... ``` then: ``` $ go run $(go env GOPATH)/src/go.opencensus.io/examples/gauges/gauge.go ``` ## How to use gauges? ### Initialize Metric Registry Create a new metric registry for all your metrics. This step is a general step for any kind of metrics and not specific to gauges. Register newly created registry with global producer manager. [embedmd]:# (gauge.go reg) ```go r := metric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) ``` ### Create gauge metric Create a gauge metric. In this example we have two metrics. **process_heap_alloc** [embedmd]:# (gauge.go alloc) ```go allocGauge, err := r.AddInt64Gauge( "process_heap_alloc", metric.WithDescription("Process heap allocation"), metric.WithUnit(metricdata.UnitBytes)) if err != nil { log.Fatalf("error creating heap allocation gauge, error %v\n", err) } ``` **process_heap_idle_to_alloc_ratio** [embedmd]:# (gauge.go idle) ```go ratioGauge, err := r.AddFloat64Gauge( "process_heap_idle_to_alloc_ratio", metric.WithDescription("process heap idle to allocate ratio"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating process heap idle to allocate ratio gauge, error %v\n", err) } ``` ### Create gauge entry Now, create or get a unique entry (equivalent of a row in a table) for a given set of tags. Since we are not using any tags in this example we only have one entry for each gauge metric. **entry for process_heap_alloc** [embedmd]:# (gauge.go entryAlloc) ```go allocEntry, err = allocGauge.GetEntry() if err != nil { log.Fatalf("error getting heap allocation gauge entry, error %v\n", err) } ``` **entry for process_heap_idle_to_alloc_ratio** [embedmd]:# (gauge.go entryIdle) ```go ratioEntry, err = ratioGauge.GetEntry() if err != nil { log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error %v\n", err) } ``` ### Set gauge values Use `Set` or `Add` function to update the value of gauge entries. You can call these methods anytime based on your metric and your application. In this example, `Set` is called periodically. [embedmd]:# (gauge.go record) ```go allocEntry.Set(int64(getAlloc())) // int64 gauge ratioEntry.Set(getIdleToAllocRatio()) // float64 gauge ``` ### Complete Example [embedmd]:# (gauge.go entire) ```go // This example shows how to use gauge metrics. The program records two gauges, one to demonstrate // a gauge with int64 value and the other to demonstrate a gauge with float64 value. // // # Metrics // // 1. process_heap_alloc (int64): Total bytes used by objects allocated in the heap. // It includes objects currently used and objects that are freed but not garbage collected. // // 2. process_heap_idle_to_alloc_ratio (float64): It is the ratio of Idle bytes to allocated // bytes in the heap. // // It periodically runs a function that retrieves the memory stats and updates the above two // metrics. These metrics are then exported using log exporter. // The program lets you choose the amount of memory (in MB) to consume. Choose different values // and query the metrics to see the change in metrics. package main import ( "bufio" "fmt" "log" "os" "runtime" "strconv" "strings" "time" "go.opencensus.io/examples/exporter" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) const ( metricsLogFile = "/tmp/metrics.log" ) var ( mem = &runtime.MemStats{} ) type memObj struct { size int b []byte } func newMemObj(size int) *memObj { n := &memObj{size: size, b: make([]byte, size)} for i := 0; i < n.size; i++ { n.b[i] = byte(i) } return n } var allocEntry *metric.Int64GaugeEntry var ratioEntry *metric.Float64Entry var arr []*memObj func getAlloc() uint64 { runtime.ReadMemStats(mem) return mem.HeapAlloc } func getIdleToAllocRatio() float64 { runtime.ReadMemStats(mem) return float64(mem.HeapIdle) / float64(mem.HeapAlloc) } func consumeMem(sizeMB int) { arr = make([]*memObj, sizeMB) for i := 0; i < sizeMB; i++ { arr = append(arr, newMemObj(1000000)) } } func doSomeWork(sizeMB int) { // do some work consumeMem(sizeMB) } func recordMetrics(delay int, done chan int) { tick := time.NewTicker(time.Duration(delay) * time.Second) for { select { case <-done: return case <-tick.C: // record heap allocation and idle to allocation ratio. allocEntry.Set(int64(getAlloc())) // int64 gauge ratioEntry.Set(getIdleToAllocRatio()) // float64 gauge } } } func getInput() int { reader := bufio.NewReader(os.Stdin) limit := 50 for { fmt.Printf("Enter memory (in MB between 1-%d): ", limit) text, _ := reader.ReadString('\n') sizeMB, err := strconv.Atoi(strings.TrimSuffix(text, "\n")) if err == nil { if sizeMB < 1 || sizeMB > limit { fmt.Printf("invalid value %s\n", text) continue } fmt.Printf("consuming %dMB\n", sizeMB) return sizeMB } fmt.Printf("error %v\n", err) } } func work() { fmt.Printf("Program periodically records following gauge metrics.\n") fmt.Printf(" 1. process_heap_alloc = the heap allocation (used + freed but not garbage collected)\n") fmt.Printf(" 2. process_idle_to_alloc_ratio = heap idle (unused) /allocation ratio\n") fmt.Printf("\nGo to file://%s to see the metrics. OR do `tail -f %s` in another terminal\n\n\n", metricsLogFile, metricsLogFile) fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metrics.\n") // Do some work and record gauge metrics. for { sizeMB := getInput() doSomeWork(sizeMB) fmt.Printf("press CTRL+C to terminate the program\n") } } func main() { // Using log exporter to export metrics but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { log.Fatalf("Error creating log exporter: %v", err) } exporter.Start() defer exporter.Stop() defer exporter.Close() // Create metric registry and register it with global producer manager. r := metric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) // Create Int64Gauge to report memory usage of a process. allocGauge, err := r.AddInt64Gauge( "process_heap_alloc", metric.WithDescription("Process heap allocation"), metric.WithUnit(metricdata.UnitBytes)) if err != nil { log.Fatalf("error creating heap allocation gauge, error %v\n", err) } allocEntry, err = allocGauge.GetEntry() if err != nil { log.Fatalf("error getting heap allocation gauge entry, error %v\n", err) } // Create Float64Gauge to report fractional cpu consumed by Garbage Collection. ratioGauge, err := r.AddFloat64Gauge( "process_heap_idle_to_alloc_ratio", metric.WithDescription("process heap idle to allocate ratio"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating process heap idle to allocate ratio gauge, error %v\n", err) } ratioEntry, err = ratioGauge.GetEntry() if err != nil { log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error %v\n", err) } // record gauge metrics every 5 seconds. This example records the gauges periodically. However, // depending on the application it can be non-periodic and can be recorded at any time. done := make(chan int) defer close(done) go recordMetrics(1, done) // do your work. work() } ``` opencensus-go-0.24.0/examples/gauges/gauge.go000066400000000000000000000134471433102037600211020ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // START entire // This example shows how to use gauge metrics. The program records two gauges, one to demonstrate // a gauge with int64 value and the other to demonstrate a gauge with float64 value. // // # Metrics // // 1. process_heap_alloc (int64): Total bytes used by objects allocated in the heap. // It includes objects currently used and objects that are freed but not garbage collected. // // 2. process_heap_idle_to_alloc_ratio (float64): It is the ratio of Idle bytes to allocated // bytes in the heap. // // It periodically runs a function that retrieves the memory stats and updates the above two // metrics. These metrics are then exported using log exporter. // The program lets you choose the amount of memory (in MB) to consume. Choose different values // and query the metrics to see the change in metrics. package main import ( "bufio" "fmt" "log" "os" "runtime" "strconv" "strings" "time" "go.opencensus.io/examples/exporter" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) const ( metricsLogFile = "/tmp/metrics.log" ) var ( mem = &runtime.MemStats{} ) type memObj struct { size int b []byte } func newMemObj(size int) *memObj { n := &memObj{size: size, b: make([]byte, size)} for i := 0; i < n.size; i++ { n.b[i] = byte(i) } return n } var allocEntry *metric.Int64GaugeEntry var ratioEntry *metric.Float64Entry var arr []*memObj func getAlloc() uint64 { runtime.ReadMemStats(mem) return mem.HeapAlloc } func getIdleToAllocRatio() float64 { runtime.ReadMemStats(mem) return float64(mem.HeapIdle) / float64(mem.HeapAlloc) } func consumeMem(sizeMB int) { arr = make([]*memObj, sizeMB) for i := 0; i < sizeMB; i++ { arr = append(arr, newMemObj(1000000)) } } func doSomeWork(sizeMB int) { // do some work consumeMem(sizeMB) } func recordMetrics(delay int, done chan int) { tick := time.NewTicker(time.Duration(delay) * time.Second) for { select { case <-done: return case <-tick.C: // record heap allocation and idle to allocation ratio. // START record allocEntry.Set(int64(getAlloc())) // int64 gauge ratioEntry.Set(getIdleToAllocRatio()) // float64 gauge // END record } } } func getInput() int { reader := bufio.NewReader(os.Stdin) limit := 50 for { fmt.Printf("Enter memory (in MB between 1-%d): ", limit) text, _ := reader.ReadString('\n') sizeMB, err := strconv.Atoi(strings.TrimSuffix(text, "\n")) if err == nil { if sizeMB < 1 || sizeMB > limit { fmt.Printf("invalid value %s\n", text) continue } fmt.Printf("consuming %dMB\n", sizeMB) return sizeMB } fmt.Printf("error %v\n", err) } } func work() { fmt.Printf("Program periodically records following gauge metrics.\n") fmt.Printf(" 1. process_heap_alloc = the heap allocation (used + freed but not garbage collected)\n") fmt.Printf(" 2. process_idle_to_alloc_ratio = heap idle (unused) /allocation ratio\n") fmt.Printf("\nGo to file://%s to see the metrics. OR do `tail -f %s` in another terminal\n\n\n", metricsLogFile, metricsLogFile) fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metrics.\n") // Do some work and record gauge metrics. for { sizeMB := getInput() doSomeWork(sizeMB) fmt.Printf("press CTRL+C to terminate the program\n") } } func main() { // Using log exporter to export metrics but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { log.Fatalf("Error creating log exporter: %v", err) } exporter.Start() defer exporter.Stop() defer exporter.Close() // Create metric registry and register it with global producer manager. // START reg r := metric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) // END reg // Create Int64Gauge to report memory usage of a process. // START alloc allocGauge, err := r.AddInt64Gauge( "process_heap_alloc", metric.WithDescription("Process heap allocation"), metric.WithUnit(metricdata.UnitBytes)) if err != nil { log.Fatalf("error creating heap allocation gauge, error %v\n", err) } // END alloc // START entryAlloc allocEntry, err = allocGauge.GetEntry() if err != nil { log.Fatalf("error getting heap allocation gauge entry, error %v\n", err) } // END entryAlloc // Create Float64Gauge to report fractional cpu consumed by Garbage Collection. // START idle ratioGauge, err := r.AddFloat64Gauge( "process_heap_idle_to_alloc_ratio", metric.WithDescription("process heap idle to allocate ratio"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { log.Fatalf("error creating process heap idle to allocate ratio gauge, error %v\n", err) } // END idle // START entryIdle ratioEntry, err = ratioGauge.GetEntry() if err != nil { log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error %v\n", err) } // END entryIdle // record gauge metrics every 5 seconds. This example records the gauges periodically. However, // depending on the application it can be non-periodic and can be recorded at any time. done := make(chan int) defer close(done) go recordMetrics(1, done) // do your work. work() } // END entire opencensus-go-0.24.0/examples/grpc/000077500000000000000000000000001433102037600171325ustar00rootroot00000000000000opencensus-go-0.24.0/examples/grpc/README.md000066400000000000000000000015431433102037600204140ustar00rootroot00000000000000# Example gRPC server and client with OpenCensus This example uses: * gRPC to create an RPC server and client. * The OpenCensus gRPC plugin to instrument the RPC server and client. * Debugging exporters to print stats and traces to stdout. ``` $ go get go.opencensus.io/examples/grpc/... ``` First, run the server: ``` $ go run $(go env GOPATH)/src/go.opencensus.io/examples/grpc/helloworld_server/main.go ``` Then, run the client: ``` $ go run $(go env GOPATH)/src/go.opencensus.io/examples/grpc/helloworld_client/main.go ``` You will see traces and stats exported on the stdout. You can use one of the [exporters](https://godoc.org/go.opencensus.io/exporter) to upload collected data to the backend of your choice. You can also see the z-pages provided from the server: * Traces: http://localhost:8081/debug/tracez * RPCs: http://localhost:8081/debug/rpcz opencensus-go-0.24.0/examples/grpc/helloworld_client/000077500000000000000000000000001433102037600226435ustar00rootroot00000000000000opencensus-go-0.24.0/examples/grpc/helloworld_client/main.go000066400000000000000000000035341433102037600241230ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "context" "log" "os" "time" "go.opencensus.io/examples/exporter" pb "go.opencensus.io/examples/grpc/proto" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" "google.golang.org/grpc" ) const ( address = "localhost:50051" defaultName = "world" ) func main() { // Register stats and trace exporters to export // the collected data. view.RegisterExporter(&exporter.PrintExporter{}) // Register the view to collect gRPC client stats. if err := view.Register(ocgrpc.DefaultClientViews...); err != nil { log.Fatal(err) } // Set up a connection to the server with the OpenCensus // stats handler to enable stats and tracing. conn, err := grpc.Dial(address, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}), grpc.WithInsecure()) if err != nil { log.Fatalf("Cannot connect: %v", err) } defer conn.Close() c := pb.NewGreeterClient(conn) // Contact the server and print out its response. name := defaultName if len(os.Args) > 1 { name = os.Args[1] } view.SetReportingPeriod(time.Second) for { r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) if err != nil { log.Printf("Could not greet: %v", err) } else { log.Printf("Greeting: %s", r.Message) } time.Sleep(2 * time.Second) } } opencensus-go-0.24.0/examples/grpc/helloworld_server/000077500000000000000000000000001433102037600226735ustar00rootroot00000000000000opencensus-go-0.24.0/examples/grpc/helloworld_server/main.go000066400000000000000000000043111433102037600241450ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:generate protoc -I ../proto --go_out=plugins=grpc:../proto ../proto/helloworld.proto package main import ( "context" "log" "math/rand" "net" "net/http" "time" "go.opencensus.io/examples/exporter" pb "go.opencensus.io/examples/grpc/proto" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" "go.opencensus.io/trace" "go.opencensus.io/zpages" "google.golang.org/grpc" ) const port = ":50051" // server is used to implement helloworld.GreeterServer. type server struct{} // SayHello implements helloworld.GreeterServer func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { ctx, span := trace.StartSpan(ctx, "sleep") time.Sleep(time.Duration(rand.Float64() * float64(time.Second))) span.End() return &pb.HelloReply{Message: "Hello " + in.Name}, nil } func main() { // Start z-Pages server. go func() { mux := http.NewServeMux() zpages.Handle(mux, "/debug") log.Fatal(http.ListenAndServe("127.0.0.1:8081", mux)) }() // Register stats and trace exporters to export // the collected data. view.RegisterExporter(&exporter.PrintExporter{}) // Register the views to collect server request count. if err := view.Register(ocgrpc.DefaultServerViews...); err != nil { log.Fatal(err) } lis, err := net.Listen("tcp", port) if err != nil { log.Fatalf("Failed to listen: %v", err) } // Set up a new server with the OpenCensus // stats handler to enable stats and tracing. s := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})) pb.RegisterGreeterServer(s, &server{}) if err := s.Serve(lis); err != nil { log.Fatalf("Failed to serve: %v", err) } } opencensus-go-0.24.0/examples/grpc/proto/000077500000000000000000000000001433102037600202755ustar00rootroot00000000000000opencensus-go-0.24.0/examples/grpc/proto/helloworld.pb.go000066400000000000000000000125151433102037600234030ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: helloworld.proto /* Package helloworld is a generated protocol buffer package. It is generated from these files: helloworld.proto It has these top-level messages: HelloRequest HelloReply */ package helloworld // import "go.opencensus.io/examples/grpc/proto" import ( fmt "fmt" math "math" proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // The request message containing the user's name. type HelloRequest struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` } func (m *HelloRequest) Reset() { *m = HelloRequest{} } func (m *HelloRequest) String() string { return proto.CompactTextString(m) } func (*HelloRequest) ProtoMessage() {} func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *HelloRequest) GetName() string { if m != nil { return m.Name } return "" } // The response message containing the greetings type HelloReply struct { Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` } func (m *HelloReply) Reset() { *m = HelloReply{} } func (m *HelloReply) String() string { return proto.CompactTextString(m) } func (*HelloReply) ProtoMessage() {} func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *HelloReply) GetMessage() string { if m != nil { return m.Message } return "" } func init() { proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Greeter service type GreeterClient interface { // Sends a greeting SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) } type greeterClient struct { cc *grpc.ClientConn } func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { return &greeterClient{cc} } func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { out := new(HelloReply) err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for Greeter service type GreeterServer interface { // Sends a greeting SayHello(context.Context, *HelloRequest) (*HelloReply, error) } func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { s.RegisterService(&_Greeter_serviceDesc, srv) } func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(HelloRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(GreeterServer).SayHello(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/helloworld.Greeter/SayHello", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) } return interceptor(ctx, in, info, handler) } var _Greeter_serviceDesc = grpc.ServiceDesc{ ServiceName: "helloworld.Greeter", HandlerType: (*GreeterServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "SayHello", Handler: _Greeter_SayHello_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: "helloworld.proto", } func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 175 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x14, 0xe4, 0x54, 0x2a, 0x31, 0x38, 0x19, 0x70, 0x49, 0x67, 0xe6, 0xeb, 0xa5, 0x17, 0x15, 0x24, 0xeb, 0xa5, 0x56, 0x24, 0xe6, 0x16, 0xe4, 0xa4, 0x16, 0x23, 0xa9, 0x75, 0xe2, 0x07, 0x2b, 0x0e, 0x07, 0xb1, 0x03, 0x40, 0x5e, 0x0a, 0x60, 0x4c, 0x62, 0x03, 0xfb, 0xcd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, } opencensus-go-0.24.0/examples/grpc/proto/helloworld.proto000066400000000000000000000021131433102037600235320ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; option java_multiple_files = true; option java_package = "io.grpc.examples.helloworld"; option java_outer_classname = "HelloWorldProto"; package helloworld; // The greeting service definition. service Greeter { // Sends a greeting rpc SayHello (HelloRequest) returns (HelloReply) {} } // The request message containing the user's name. message HelloRequest { string name = 1; } // The response message containing the greetings message HelloReply { string message = 1; } opencensus-go-0.24.0/examples/helloworld/000077500000000000000000000000001433102037600203525ustar00rootroot00000000000000opencensus-go-0.24.0/examples/helloworld/main.go000066400000000000000000000056261433102037600216360ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Command helloworld is an example program that collects data for // video size. package main import ( "context" "fmt" "log" "math/rand" "time" "go.opencensus.io/examples/exporter" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.opencensus.io/trace" ) var ( // frontendKey allows us to breakdown the recorded data // by the frontend used when uploading the video. frontendKey tag.Key // videoSize will measure the size of processed videos. videoSize *stats.Int64Measure ) func main() { ctx := context.Background() // Register an exporter to be able to retrieve // the data from the subscribed views. e, err := exporter.NewLogExporter(exporter.Options{ReportingInterval: time.Second}) if err != nil { log.Fatal(err) } e.Start() defer e.Stop() defer e.Close() trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) frontendKey = tag.MustNewKey("example.com/keys/frontend") videoSize = stats.Int64("example.com/measure/video_size", "size of processed videos", stats.UnitBytes) view.SetReportingPeriod(2 * time.Second) // Create view to see the processed video size // distribution broken down by frontend. // Register will allow view data to be exported. if err := view.Register(&view.View{ Name: "example.com/views/video_size", Description: "processed video size over time", TagKeys: []tag.Key{frontendKey}, Measure: videoSize, Aggregation: view.Distribution(1<<16, 1<<32), }); err != nil { log.Fatalf("Cannot register view: %v", err) } // Process the video. process(ctx) // Wait for a duration longer than reporting duration to ensure the stats // library reports the collected data. fmt.Println("Wait longer than the reporting duration...") time.Sleep(4 * time.Second) } // process processes the video and instruments the processing // by creating a span and collecting metrics about the operation. func process(ctx context.Context) { ctx, err := tag.New(ctx, tag.Insert(frontendKey, "mobile-ios9.3.5"), ) if err != nil { log.Fatal(err) } ctx, span := trace.StartSpan(ctx, "example.com/ProcessVideo") defer span.End() // Process video. // Record the processed video size. // Sleep for [1,10] milliseconds to fake work. time.Sleep(time.Duration(rand.Intn(10)+1) * time.Millisecond) stats.Record(ctx, videoSize.M(25648)) } opencensus-go-0.24.0/examples/http/000077500000000000000000000000001433102037600171565ustar00rootroot00000000000000opencensus-go-0.24.0/examples/http/README.md000066400000000000000000000015461433102037600204430ustar00rootroot00000000000000# Example net/http server and client with OpenCensus This example uses: * net/http to create a server and client. * The OpenCensus net/http plugin to instrument the server and client. * Debugging exporters to print stats and traces to stdout. ``` $ go get go.opencensus.io/examples/http/... ``` First, run the server: ``` $ go run $(go env GOPATH)/src/go.opencensus.io/examples/http/helloworld_server/main.go ``` Then, run the client: ``` $ go run $(go env GOPATH)/src/go.opencensus.io/examples/http/helloworld_client/main.go ``` You will see traces and stats exported on the stdout. You can use one of the [exporters](https://godoc.org/go.opencensus.io/exporter) to upload collected data to the backend of your choice. You can also see the z-pages provided from the server: * Traces: http://localhost:8081/debug/tracez * RPCs: http://localhost:8081/debug/rpcz opencensus-go-0.24.0/examples/http/helloworld_client/000077500000000000000000000000001433102037600226675ustar00rootroot00000000000000opencensus-go-0.24.0/examples/http/helloworld_client/main.go000066400000000000000000000030441433102037600241430ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "log" "net/http" "time" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/trace" "go.opencensus.io/examples/exporter" "go.opencensus.io/stats/view" ) const server = "http://localhost:50030" func main() { // Register stats and trace exporters to export the collected data. exporter := &exporter.PrintExporter{} view.RegisterExporter(exporter) trace.RegisterExporter(exporter) // Always trace for this demo. In a production application, you should // configure this to a trace.ProbabilitySampler set at the desired // probability. trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) // Report stats at every second. view.SetReportingPeriod(1 * time.Second) client := &http.Client{Transport: &ochttp.Transport{}} resp, err := client.Get(server) if err != nil { log.Printf("Failed to get response: %v", err) } else { resp.Body.Close() } time.Sleep(2 * time.Second) // Wait until stats are reported. } opencensus-go-0.24.0/examples/http/helloworld_server/000077500000000000000000000000001433102037600227175ustar00rootroot00000000000000opencensus-go-0.24.0/examples/http/helloworld_server/main.go000066400000000000000000000051071433102037600241750ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "log" "net/http" "time" "go.opencensus.io/zpages" "go.opencensus.io/examples/exporter" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/stats/view" "go.opencensus.io/trace" ) const ( metricsLogFile = "/tmp/metrics.log" tracesLogFile = "/tmp/trace.log" ) func main() { // Start z-Pages server. go func() { mux := http.NewServeMux() zpages.Handle(mux, "/debug") log.Fatal(http.ListenAndServe("127.0.0.1:8081", mux)) }() // Using log exporter to export metrics but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, TracesLogFile: tracesLogFile, }) if err != nil { log.Fatalf("Error creating log exporter: %v", err) } exporter.Start() defer exporter.Stop() defer exporter.Close() // Always trace for this demo. In a production application, you should // configure this to a trace.ProbabilitySampler set at the desired // probability. trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) // Report stats at every second. view.SetReportingPeriod(1 * time.Second) client := &http.Client{Transport: &ochttp.Transport{}} http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { fmt.Fprintf(w, "hello world") // Provide an example of how spans can be annotated with metadata _, span := trace.StartSpan(req.Context(), "child") defer span.End() span.Annotate([]trace.Attribute{trace.StringAttribute("key", "value")}, "something happened") span.AddAttributes(trace.StringAttribute("hello", "world")) time.Sleep(time.Millisecond * 125) r, _ := http.NewRequest("GET", "https://example.com", nil) // Propagate the trace header info in the outgoing requests. r = r.WithContext(req.Context()) resp, err := client.Do(r) if err != nil { log.Println(err) } else { // TODO: handle response resp.Body.Close() } }) log.Fatal(http.ListenAndServe(":50030", &ochttp.Handler{})) } opencensus-go-0.24.0/examples/quickstart/000077500000000000000000000000001433102037600203715ustar00rootroot00000000000000opencensus-go-0.24.0/examples/quickstart/stats.go000066400000000000000000000112371433102037600220620ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Command stats implements the stats Quick Start example from: // // https://opencensus.io/quickstart/go/metrics/ package main import ( "bufio" "bytes" "context" "fmt" "io" "log" "os" "time" "net/http" "go.opencensus.io/examples/exporter" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.opencensus.io/zpages" ) const ( metricsLogFile = "/tmp/metrics.log" ) // Measures for the stats quickstart. var ( // The latency in milliseconds mLatencyMs = stats.Float64("repl/latency", "The latency in milliseconds per REPL loop", stats.UnitMilliseconds) // Counts the number of lines read in from standard input mLinesIn = stats.Int64("repl/lines_in", "The number of lines read in", stats.UnitDimensionless) // Encounters the number of non EOF(end-of-file) errors. mErrors = stats.Int64("repl/errors", "The number of errors encountered", stats.UnitDimensionless) // Counts/groups the lengths of lines read in. mLineLengths = stats.Int64("repl/line_lengths", "The distribution of line lengths", stats.UnitBytes) ) // TagKeys for the stats quickstart. var ( keyMethod = tag.MustNewKey("method") ) // Views for the stats quickstart. var ( latencyView = &view.View{ Name: "demo/latency", Measure: mLatencyMs, Description: "The distribution of the latencies", // Latency in buckets: // [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] Aggregation: view.Distribution(25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000), TagKeys: []tag.Key{keyMethod}} lineCountView = &view.View{ Name: "demo/lines_in", Measure: mLinesIn, Description: "The number of lines from standard input", Aggregation: view.Count(), } errorCountView = &view.View{ Name: "demo/errors", Measure: mErrors, Description: "The number of errors encountered", Aggregation: view.Count(), } lineLengthView = &view.View{ Name: "demo/line_lengths", Description: "Groups the lengths of keys in buckets", Measure: mLineLengths, // Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] Aggregation: view.Distribution(5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000), } ) func main() { zpages.Handle(nil, "/debug") go http.ListenAndServe("localhost:8080", nil) // Using log exporter here to export metrics but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { log.Fatalf("Error creating log exporter: %v", err) } exporter.Start() defer exporter.Stop() defer exporter.Close() // Register the views if err := view.Register(latencyView, lineCountView, errorCountView, lineLengthView); err != nil { log.Fatalf("Failed to register views: %v", err) } // In a REPL: // 1. Read input // 2. process input br := bufio.NewReader(os.Stdin) // repl is the read, evaluate, print, loop for { if err := readEvaluateProcess(br); err != nil { if err == io.EOF { return } log.Fatal(err) } } } // readEvaluateProcess reads a line from the input reader and // then processes it. It returns an error if any was encountered. func readEvaluateProcess(br *bufio.Reader) error { ctx, err := tag.New(context.Background(), tag.Insert(keyMethod, "repl")) if err != nil { return err } fmt.Printf("> ") line, _, err := br.ReadLine() if err != nil { if err != io.EOF { stats.Record(ctx, mErrors.M(1)) } return err } out, err := processLine(ctx, line) if err != nil { stats.Record(ctx, mErrors.M(1)) return err } fmt.Printf("< %s\n\n", out) return nil } // processLine takes in a line of text and // transforms it. Currently it just capitalizes it. func processLine(ctx context.Context, in []byte) (out []byte, err error) { startTime := time.Now() defer func() { ms := float64(time.Since(startTime).Nanoseconds()) / 1e6 stats.Record(ctx, mLinesIn.M(1), mLatencyMs.M(ms), mLineLengths.M(int64(len(in)))) }() return bytes.ToUpper(in), nil } opencensus-go-0.24.0/exporter/000077500000000000000000000000001433102037600162315ustar00rootroot00000000000000opencensus-go-0.24.0/exporter/stackdriver/000077500000000000000000000000001433102037600205525ustar00rootroot00000000000000opencensus-go-0.24.0/exporter/stackdriver/propagation/000077500000000000000000000000001433102037600230755ustar00rootroot00000000000000opencensus-go-0.24.0/exporter/stackdriver/propagation/http.go000066400000000000000000000056261433102037600244140ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package propagation implement X-Cloud-Trace-Context header propagation used // by Google Cloud products. package propagation // import "go.opencensus.io/exporter/stackdriver/propagation" import ( "encoding/binary" "encoding/hex" "fmt" "net/http" "strconv" "strings" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) const ( httpHeaderMaxSize = 200 httpHeader = `X-Cloud-Trace-Context` ) var _ propagation.HTTPFormat = (*HTTPFormat)(nil) // HTTPFormat implements propagation.HTTPFormat to propagate // traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. type HTTPFormat struct{} // SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { h := req.Header.Get(httpHeader) // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. // Return if the header is empty or missing, or if the header is unreasonably // large, to avoid making unnecessary copies of a large string. if h == "" || len(h) > httpHeaderMaxSize { return trace.SpanContext{}, false } // Parse the trace id field. slash := strings.Index(h, `/`) if slash == -1 { return trace.SpanContext{}, false } tid, h := h[:slash], h[slash+1:] buf, err := hex.DecodeString(tid) if err != nil { return trace.SpanContext{}, false } copy(sc.TraceID[:], buf) // Parse the span id field. spanstr := h semicolon := strings.Index(h, `;`) if semicolon != -1 { spanstr, h = h[:semicolon], h[semicolon+1:] } sid, err := strconv.ParseUint(spanstr, 10, 64) if err != nil { return trace.SpanContext{}, false } binary.BigEndian.PutUint64(sc.SpanID[:], sid) // Parse the options field, options field is optional. if !strings.HasPrefix(h, "o=") { return sc, true } o, err := strconv.ParseUint(h[2:], 10, 64) if err != nil { return trace.SpanContext{}, false } sc.TraceOptions = trace.TraceOptions(o) return sc, true } // SpanContextToRequest modifies the given request to include a Stackdriver Trace header. func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { sid := binary.BigEndian.Uint64(sc.SpanID[:]) header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) req.Header.Set(httpHeader, header) } opencensus-go-0.24.0/exporter/stackdriver/propagation/http_test.go000066400000000000000000000041771433102037600254530ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation import ( "net/http" "reflect" "testing" "go.opencensus.io/trace" ) func TestHTTPFormat(t *testing.T) { format := &HTTPFormat{} traceID := [16]byte{16, 84, 69, 170, 120, 67, 188, 139, 242, 6, 177, 32, 0, 16, 0, 0} spanID1 := [8]byte{255, 0, 0, 0, 0, 0, 0, 123} spanID2 := [8]byte{0, 0, 0, 0, 0, 0, 0, 123} tests := []struct { incoming string wantSpanContext trace.SpanContext }{ { incoming: "105445aa7843bc8bf206b12000100000/18374686479671623803;o=1", wantSpanContext: trace.SpanContext{ TraceID: traceID, SpanID: spanID1, TraceOptions: 1, }, }, { incoming: "105445aa7843bc8bf206b12000100000/123;o=0", wantSpanContext: trace.SpanContext{ TraceID: traceID, SpanID: spanID2, TraceOptions: 0, }, }, } for _, tt := range tests { t.Run(tt.incoming, func(t *testing.T) { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Add(httpHeader, tt.incoming) sc, ok := format.SpanContextFromRequest(req) if !ok { t.Errorf("exporter.SpanContextFromRequest() = false; want true") } if got, want := sc, tt.wantSpanContext; !reflect.DeepEqual(got, want) { t.Errorf("exporter.SpanContextFromRequest() returned span context %v; want %v", got, want) } req, _ = http.NewRequest("GET", "http://example.com", nil) format.SpanContextToRequest(sc, req) if got, want := req.Header.Get(httpHeader), tt.incoming; got != want { t.Errorf("exporter.SpanContextToRequest() returned header %q; want %q", got, want) } }) } } opencensus-go-0.24.0/go.mod000066400000000000000000000004541433102037600154720ustar00rootroot00000000000000module go.opencensus.io require ( github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e github.com/golang/protobuf v1.4.3 github.com/google/go-cmp v0.5.3 github.com/stretchr/testify v1.8.1 golang.org/x/net v0.0.0-20201110031124-69a78807bb2b google.golang.org/grpc v1.33.2 ) go 1.13 opencensus-go-0.24.0/go.sum000066400000000000000000000242361433102037600155230ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= opencensus-go-0.24.0/internal/000077500000000000000000000000001433102037600161755ustar00rootroot00000000000000opencensus-go-0.24.0/internal/check/000077500000000000000000000000001433102037600172525ustar00rootroot00000000000000opencensus-go-0.24.0/internal/check/version.go000066400000000000000000000045111433102037600212670ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Command version checks that the version string matches the latest Git tag. // This is expected to pass only on the master branch. package main import ( "bytes" "fmt" "log" "os" "os/exec" "sort" "strconv" "strings" opencensus "go.opencensus.io" ) func main() { cmd := exec.Command("git", "tag") var buf bytes.Buffer cmd.Stdout = &buf err := cmd.Run() if err != nil { log.Fatal(err) } var versions []version for _, vStr := range strings.Split(buf.String(), "\n") { if len(vStr) == 0 { continue } // ignore pre-release versions if isPreRelease(vStr) { continue } versions = append(versions, parseVersion(vStr)) } sort.Slice(versions, func(i, j int) bool { return versionLess(versions[i], versions[j]) }) latest := versions[len(versions)-1] codeVersion := parseVersion("v" + opencensus.Version()) if !versionLess(latest, codeVersion) { fmt.Printf("exporter.Version is out of date with Git tags. Got %s; want something greater than %s\n", opencensus.Version(), latest) os.Exit(1) } fmt.Printf("exporter.Version is up-to-date: %s\n", opencensus.Version()) } type version [3]int func versionLess(v1, v2 version) bool { for c := 0; c < 3; c++ { if diff := v1[c] - v2[c]; diff != 0 { return diff < 0 } } return false } func isPreRelease(vStr string) bool { split := strings.Split(vStr[1:], ".") return strings.Contains(split[2], "-") } func parseVersion(vStr string) version { split := strings.Split(vStr[1:], ".") var ( v version err error ) for i := 0; i < 3; i++ { v[i], err = strconv.Atoi(split[i]) if err != nil { fmt.Printf("Unrecognized version tag %q: %s\n", vStr, err) os.Exit(2) } } return v } func (v version) String() string { return fmt.Sprintf("%d.%d.%d", v[0], v[1], v[2]) } opencensus-go-0.24.0/internal/internal.go000066400000000000000000000023441433102037600203430ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal // import "go.opencensus.io/internal" import ( "fmt" "time" opencensus "go.opencensus.io" ) // UserAgent is the user agent to be added to the outgoing // requests from the exporters. var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) // MonotonicEndTime returns the end time at present // but offset from start, monotonically. // // The monotonic clock is used in subtractions hence // the duration since start added back to start gives // end as a monotonic time. // See https://golang.org/pkg/time/#hdr-Monotonic_Clocks func MonotonicEndTime(start time.Time) time.Time { return start.Add(time.Since(start)) } opencensus-go-0.24.0/internal/readme/000077500000000000000000000000001433102037600174325ustar00rootroot00000000000000opencensus-go-0.24.0/internal/readme/README.md000066400000000000000000000002211433102037600207040ustar00rootroot00000000000000Use the following commands to regenerate the README. ```bash $ GO11MODULE=off go get github.com/rakyll/embedmd $ embedmd -w ../../README.md ``` opencensus-go-0.24.0/internal/readme/mkdocs.sh000077500000000000000000000000571433102037600212530ustar00rootroot00000000000000#!/bin/sh embedmd source.md > ../../README.md opencensus-go-0.24.0/internal/readme/stats.go000066400000000000000000000031321433102037600211160ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package readme generates the README. package readme // import "go.opencensus.io/internal/readme" import ( "context" "log" "go.opencensus.io/stats" "go.opencensus.io/stats/view" ) // README.md is generated with the examples here by using embedmd. // For more details, see https://github.com/rakyll/embedmd. func statsExamples() { ctx := context.Background() videoSize := stats.Int64("example.com/video_size", "processed video size", "MB") // START aggs distAgg := view.Distribution(1<<32, 2<<32, 3<<32) countAgg := view.Count() sumAgg := view.Sum() // END aggs _, _, _ = distAgg, countAgg, sumAgg // START view if err := view.Register(&view.View{ Name: "example.com/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), }); err != nil { log.Fatalf("Failed to register view: %v", err) } // END view // START record stats.Record(ctx, videoSize.M(102478)) // END record } opencensus-go-0.24.0/internal/readme/tags.go000066400000000000000000000024671433102037600207300ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package readme import ( "context" "log" "go.opencensus.io/tag" ) func tagsExamples() { ctx := context.Background() osKey := tag.MustNewKey("example.com/keys/user-os") userIDKey := tag.MustNewKey("example.com/keys/user-id") // START new ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), ) if err != nil { log.Fatal(err) } // END new // START profiler ctx, err = tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Insert(userIDKey, "fff0989878"), ) if err != nil { log.Fatal(err) } tag.Do(ctx, func(ctx context.Context) { // Do work. // When profiling is on, samples will be // recorded with the key/values from the tag map. }) // END profiler } opencensus-go-0.24.0/internal/readme/trace.go000066400000000000000000000015241433102037600210610ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package readme import ( "context" "go.opencensus.io/trace" ) func traceExamples() { ctx := context.Background() // START startend ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() // Do work to get from cache. // END startend } opencensus-go-0.24.0/internal/sanitize.go000066400000000000000000000024411433102037600203530ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "strings" "unicode" ) const labelKeySizeLimit = 100 // Sanitize returns a string that is trunacated to 100 characters if it's too // long, and replaces non-alphanumeric characters to underscores. func Sanitize(s string) string { if len(s) == 0 { return s } if len(s) > labelKeySizeLimit { s = s[:labelKeySizeLimit] } s = strings.Map(sanitizeRune, s) if unicode.IsDigit(rune(s[0])) { s = "key_" + s } if s[0] == '_' { s = "key" + s } return s } // converts anything that is not a letter or digit to an underscore func sanitizeRune(r rune) rune { if unicode.IsLetter(r) || unicode.IsDigit(r) { return r } // Everything else turns into an underscore return '_' } opencensus-go-0.24.0/internal/sanitize_test.go000066400000000000000000000032011433102037600214050ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "strings" "testing" ) func TestSanitize(t *testing.T) { tests := []struct { name string input string want string }{ { name: "trunacate long string", input: strings.Repeat("a", 101), want: strings.Repeat("a", 100), }, { name: "replace character", input: "test/key-1", want: "test_key_1", }, { name: "add prefix if starting with digit", input: "0123456789", want: "key_0123456789", }, { name: "add prefix if starting with _", input: "_0123456789", want: "key_0123456789", }, { name: "starts with _ after sanitization", input: "/0123456789", want: "key_0123456789", }, { name: "valid input", input: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789", want: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got, want := Sanitize(tt.input), tt.want; got != want { t.Errorf("sanitize() = %q; want %q", got, want) } }) } } opencensus-go-0.24.0/internal/tagencoding/000077500000000000000000000000001433102037600204575ustar00rootroot00000000000000opencensus-go-0.24.0/internal/tagencoding/tagencoding.go000066400000000000000000000040111433102037600232640ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Package tagencoding contains the tag encoding // used interally by the stats collector. package tagencoding // import "go.opencensus.io/internal/tagencoding" // Values represent the encoded buffer for the values. type Values struct { Buffer []byte WriteIndex int ReadIndex int } func (vb *Values) growIfRequired(expected int) { if len(vb.Buffer)-vb.WriteIndex < expected { tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) copy(tmp, vb.Buffer) vb.Buffer = tmp } } // WriteValue is the helper method to encode Values from map[Key][]byte. func (vb *Values) WriteValue(v []byte) { length := len(v) & 0xff vb.growIfRequired(1 + length) // writing length of v vb.Buffer[vb.WriteIndex] = byte(length) vb.WriteIndex++ if length == 0 { // No value was encoded for this key return } // writing v copy(vb.Buffer[vb.WriteIndex:], v[:length]) vb.WriteIndex += length } // ReadValue is the helper method to decode Values to a map[Key][]byte. func (vb *Values) ReadValue() []byte { // read length of v length := int(vb.Buffer[vb.ReadIndex]) vb.ReadIndex++ if length == 0 { // No value was encoded for this key return nil } // read value of v v := make([]byte, length) endIdx := vb.ReadIndex + length copy(v, vb.Buffer[vb.ReadIndex:endIdx]) vb.ReadIndex = endIdx return v } // Bytes returns a reference to already written bytes in the Buffer. func (vb *Values) Bytes() []byte { return vb.Buffer[:vb.WriteIndex] } opencensus-go-0.24.0/internal/testpb/000077500000000000000000000000001433102037600174765ustar00rootroot00000000000000opencensus-go-0.24.0/internal/testpb/generate.sh000077500000000000000000000002271433102037600216300ustar00rootroot00000000000000#!/bin/sh # generate .pb.go file from .proto file. set -e protoc --go_out=plugins=grpc:. test.proto echo '//go:generate ./generate.sh ' >> test.pb.go opencensus-go-0.24.0/internal/testpb/impl.go000066400000000000000000000042701433102037600207710ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package testpb import ( "context" "fmt" "io" "net" "testing" "time" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/trace" "google.golang.org/grpc" ) type testServer struct{} var _ FooServer = (*testServer)(nil) func (s *testServer) Single(ctx context.Context, in *FooRequest) (*FooResponse, error) { if in.SleepNanos > 0 { _, span := trace.StartSpan(ctx, "testpb.Single.Sleep") span.AddAttributes(trace.Int64Attribute("sleep_nanos", in.SleepNanos)) time.Sleep(time.Duration(in.SleepNanos)) span.End() } if in.Fail { return nil, fmt.Errorf("request failed") } return &FooResponse{}, nil } func (s *testServer) Multiple(stream Foo_MultipleServer) error { for { in, err := stream.Recv() if err == io.EOF { return nil } if err != nil { return err } if in.Fail { return fmt.Errorf("request failed") } if err := stream.Send(&FooResponse{}); err != nil { return err } } } // NewTestClient returns a new TestClient. func NewTestClient(l *testing.T) (client FooClient, cleanup func()) { // initialize server listener, err := net.Listen("tcp", "localhost:0") if err != nil { l.Fatal(err) } server := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})) RegisterFooServer(server, &testServer{}) go server.Serve(listener) // Initialize client. clientConn, err := grpc.Dial( listener.Addr().String(), grpc.WithInsecure(), grpc.WithStatsHandler(&ocgrpc.ClientHandler{}), grpc.WithBlock()) if err != nil { l.Fatal(err) } client = NewFooClient(clientConn) cleanup = func() { server.GracefulStop() clientConn.Close() } return client, cleanup } opencensus-go-0.24.0/internal/testpb/test.pb.go000066400000000000000000000150541433102037600214110ustar00rootroot00000000000000// Code generated by protoc-gen-go. DO NOT EDIT. // source: test.proto /* Package testpb is a generated protocol buffer package. It is generated from these files: test.proto It has these top-level messages: FooRequest FooResponse */ package testpb // import "go.opencensus.io/internal/testpb" import ( fmt "fmt" math "math" proto "github.com/golang/protobuf/proto" context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type FooRequest struct { Fail bool `protobuf:"varint,1,opt,name=fail" json:"fail,omitempty"` SleepNanos int64 `protobuf:"varint,2,opt,name=sleep_nanos,json=sleepNanos" json:"sleep_nanos,omitempty"` } func (m *FooRequest) Reset() { *m = FooRequest{} } func (m *FooRequest) String() string { return proto.CompactTextString(m) } func (*FooRequest) ProtoMessage() {} func (*FooRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *FooRequest) GetFail() bool { if m != nil { return m.Fail } return false } func (m *FooRequest) GetSleepNanos() int64 { if m != nil { return m.SleepNanos } return 0 } type FooResponse struct { } func (m *FooResponse) Reset() { *m = FooResponse{} } func (m *FooResponse) String() string { return proto.CompactTextString(m) } func (*FooResponse) ProtoMessage() {} func (*FooResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func init() { proto.RegisterType((*FooRequest)(nil), "testpb.FooRequest") proto.RegisterType((*FooResponse)(nil), "testpb.FooResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // Client API for Foo service type FooClient interface { Single(ctx context.Context, in *FooRequest, opts ...grpc.CallOption) (*FooResponse, error) Multiple(ctx context.Context, opts ...grpc.CallOption) (Foo_MultipleClient, error) } type fooClient struct { cc *grpc.ClientConn } func NewFooClient(cc *grpc.ClientConn) FooClient { return &fooClient{cc} } func (c *fooClient) Single(ctx context.Context, in *FooRequest, opts ...grpc.CallOption) (*FooResponse, error) { out := new(FooResponse) err := grpc.Invoke(ctx, "/testpb.Foo/Single", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } func (c *fooClient) Multiple(ctx context.Context, opts ...grpc.CallOption) (Foo_MultipleClient, error) { stream, err := grpc.NewClientStream(ctx, &_Foo_serviceDesc.Streams[0], c.cc, "/testpb.Foo/Multiple", opts...) if err != nil { return nil, err } x := &fooMultipleClient{stream} return x, nil } type Foo_MultipleClient interface { Send(*FooRequest) error Recv() (*FooResponse, error) grpc.ClientStream } type fooMultipleClient struct { grpc.ClientStream } func (x *fooMultipleClient) Send(m *FooRequest) error { return x.ClientStream.SendMsg(m) } func (x *fooMultipleClient) Recv() (*FooResponse, error) { m := new(FooResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // Server API for Foo service type FooServer interface { Single(context.Context, *FooRequest) (*FooResponse, error) Multiple(Foo_MultipleServer) error } func RegisterFooServer(s *grpc.Server, srv FooServer) { s.RegisterService(&_Foo_serviceDesc, srv) } func _Foo_Single_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(FooRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(FooServer).Single(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/testpb.Foo/Single", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(FooServer).Single(ctx, req.(*FooRequest)) } return interceptor(ctx, in, info, handler) } func _Foo_Multiple_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(FooServer).Multiple(&fooMultipleServer{stream}) } type Foo_MultipleServer interface { Send(*FooResponse) error Recv() (*FooRequest, error) grpc.ServerStream } type fooMultipleServer struct { grpc.ServerStream } func (x *fooMultipleServer) Send(m *FooResponse) error { return x.ServerStream.SendMsg(m) } func (x *fooMultipleServer) Recv() (*FooRequest, error) { m := new(FooRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } var _Foo_serviceDesc = grpc.ServiceDesc{ ServiceName: "testpb.Foo", HandlerType: (*FooServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Single", Handler: _Foo_Single_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "Multiple", Handler: _Foo_Multiple_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "test.proto", } func init() { proto.RegisterFile("test.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 165 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x49, 0x2d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0x94, 0x1c, 0xb9, 0xb8, 0xdc, 0xf2, 0xf3, 0x83, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x84, 0xb8, 0x58, 0xd2, 0x12, 0x33, 0x73, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x82, 0xc0, 0x6c, 0x21, 0x79, 0x2e, 0xee, 0xe2, 0x9c, 0xd4, 0xd4, 0x82, 0xf8, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xe6, 0x20, 0x2e, 0xb0, 0x90, 0x1f, 0x48, 0x44, 0x89, 0x97, 0x8b, 0x1b, 0x6c, 0x44, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x51, 0x21, 0x17, 0xb3, 0x5b, 0x7e, 0xbe, 0x90, 0x21, 0x17, 0x5b, 0x70, 0x66, 0x5e, 0x7a, 0x4e, 0xaa, 0x90, 0x90, 0x1e, 0xc4, 0x2e, 0x3d, 0x84, 0x45, 0x52, 0xc2, 0x28, 0x62, 0x10, 0x9d, 0x42, 0xe6, 0x5c, 0x1c, 0xbe, 0xa5, 0x39, 0x25, 0x99, 0x05, 0x24, 0x68, 0xd2, 0x60, 0x34, 0x60, 0x4c, 0x62, 0x03, 0xfb, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x37, 0xb1, 0x2d, 0x6e, 0xe1, 0x00, 0x00, 0x00, } //go:generate ./generate.sh opencensus-go-0.24.0/internal/testpb/test.proto000066400000000000000000000004001433102037600215340ustar00rootroot00000000000000syntax = "proto3"; package testpb; message FooRequest { bool fail = 1; int64 sleep_nanos = 2; } message FooResponse { } service Foo { rpc Single(FooRequest) returns (FooResponse); rpc Multiple(stream FooRequest) returns (stream FooResponse); } opencensus-go-0.24.0/internal/traceinternals.go000066400000000000000000000030721433102037600215440ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "time" ) // Trace allows internal access to some trace functionality. // TODO(#412): remove this var Trace interface{} // LocalSpanStoreEnabled true if the local span store is enabled. var LocalSpanStoreEnabled bool // BucketConfiguration stores the number of samples to store for span buckets // for successful and failed spans for a particular span name. type BucketConfiguration struct { Name string MaxRequestsSucceeded int MaxRequestsErrors int } // PerMethodSummary is a summary of the spans stored for a single span name. type PerMethodSummary struct { Active int LatencyBuckets []LatencyBucketSummary ErrorBuckets []ErrorBucketSummary } // LatencyBucketSummary is a summary of a latency bucket. type LatencyBucketSummary struct { MinLatency, MaxLatency time.Duration Size int } // ErrorBucketSummary is a summary of an error bucket. type ErrorBucketSummary struct { ErrorCode int32 Size int } opencensus-go-0.24.0/metric/000077500000000000000000000000001433102037600156445ustar00rootroot00000000000000opencensus-go-0.24.0/metric/common.go000066400000000000000000000074041433102037600174700ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "sync" "time" "go.opencensus.io/internal/tagencoding" "go.opencensus.io/metric/metricdata" ) // baseMetric is common representation for gauge and cumulative metrics. // // baseMetric maintains a value for each combination of label values passed to // Set, Add, or Inc method. // // baseMetric should not be used directly, use metric specific type such as // Float64Gauge or Int64Gauge. type baseMetric struct { vals sync.Map desc metricdata.Descriptor start time.Time keys []metricdata.LabelKey constLabelValues []metricdata.LabelValue bmType baseMetricType } type baseMetricType int const ( gaugeInt64 baseMetricType = iota gaugeFloat64 derivedGaugeInt64 derivedGaugeFloat64 cumulativeInt64 cumulativeFloat64 derivedCumulativeInt64 derivedCumulativeFloat64 ) type baseEntry interface { read(t time.Time) metricdata.Point } func (bm *baseMetric) startTime() *time.Time { switch bm.bmType { case cumulativeInt64, cumulativeFloat64, derivedCumulativeInt64, derivedCumulativeFloat64: return &bm.start default: // gauges don't have start time. return nil } } // Read returns the current values of the baseMetric as a metric for export. func (bm *baseMetric) read() *metricdata.Metric { now := time.Now() startTime := bm.startTime() if startTime == nil { startTime = &now } m := &metricdata.Metric{ Descriptor: bm.desc, } bm.vals.Range(func(k, v interface{}) bool { entry := v.(baseEntry) key := k.(string) labelVals := bm.decodeLabelVals(key) m.TimeSeries = append(m.TimeSeries, &metricdata.TimeSeries{ StartTime: *startTime, LabelValues: labelVals, Points: []metricdata.Point{ entry.read(now), }, }) return true }) return m } func (bm *baseMetric) encodeLabelVals(labelVals []metricdata.LabelValue) string { vb := &tagencoding.Values{} for _, v := range labelVals { b := make([]byte, 1, len(v.Value)+1) if v.Present { b[0] = 1 b = append(b, []byte(v.Value)...) } vb.WriteValue(b) } return string(vb.Bytes()) } func (bm *baseMetric) decodeLabelVals(s string) []metricdata.LabelValue { vals := make([]metricdata.LabelValue, 0, len(bm.keys)) vb := &tagencoding.Values{Buffer: []byte(s)} for range bm.keys { v := vb.ReadValue() if v[0] == 0 { vals = append(vals, metricdata.LabelValue{}) } else { vals = append(vals, metricdata.NewLabelValue(string(v[1:]))) } } return vals } func (bm *baseMetric) entryForValues(labelVals []metricdata.LabelValue, newEntry func() baseEntry) (interface{}, error) { labelVals = append(bm.constLabelValues, labelVals...) if len(labelVals) != len(bm.keys) { return nil, errKeyValueMismatch } mapKey := bm.encodeLabelVals(labelVals) if entry, ok := bm.vals.Load(mapKey); ok { return entry, nil } entry, _ := bm.vals.LoadOrStore(mapKey, newEntry()) return entry, nil } func (bm *baseMetric) upsertEntry(labelVals []metricdata.LabelValue, newEntry func() baseEntry) error { labelVals = append(bm.constLabelValues, labelVals...) if len(labelVals) != len(bm.keys) { return errKeyValueMismatch } mapKey := bm.encodeLabelVals(labelVals) bm.vals.Delete(mapKey) bm.vals.Store(mapKey, newEntry()) return nil } opencensus-go-0.24.0/metric/cumulative.go000066400000000000000000000137571433102037600203660ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "math" "sync/atomic" "time" "go.opencensus.io/metric/metricdata" ) // Float64Cumulative represents a float64 value that can only go up. // // Float64Cumulative maintains a float64 value for each combination of label values // passed to the Set or Inc methods. type Float64Cumulative struct { bm baseMetric } // Float64CumulativeEntry represents a single value of the cumulative corresponding to a set // of label values. type Float64CumulativeEntry struct { val uint64 // needs to be uint64 for atomic access, interpret with math.Float64frombits } func (e *Float64CumulativeEntry) read(t time.Time) metricdata.Point { v := math.Float64frombits(atomic.LoadUint64(&e.val)) if v < 0 { v = 0 } return metricdata.NewFloat64Point(t, v) } // GetEntry returns a cumulative entry where each key for this cumulative has the value // given. // // The number of label values supplied must be exactly the same as the number // of keys supplied when this cumulative was created. func (c *Float64Cumulative) GetEntry(labelVals ...metricdata.LabelValue) (*Float64CumulativeEntry, error) { entry, err := c.bm.entryForValues(labelVals, func() baseEntry { return &Float64CumulativeEntry{} }) if err != nil { return nil, err } return entry.(*Float64CumulativeEntry), nil } // Inc increments the cumulative entry value by val. It returns without incrementing if the val // is negative. func (e *Float64CumulativeEntry) Inc(val float64) { var swapped bool if val <= 0.0 { return } for !swapped { oldVal := atomic.LoadUint64(&e.val) newVal := math.Float64bits(math.Float64frombits(oldVal) + val) swapped = atomic.CompareAndSwapUint64(&e.val, oldVal, newVal) } } // Int64Cumulative represents a int64 cumulative value that can only go up. // // Int64Cumulative maintains an int64 value for each combination of label values passed to the // Set or Inc methods. type Int64Cumulative struct { bm baseMetric } // Int64CumulativeEntry represents a single value of the cumulative corresponding to a set // of label values. type Int64CumulativeEntry struct { val int64 } func (e *Int64CumulativeEntry) read(t time.Time) metricdata.Point { v := atomic.LoadInt64(&e.val) if v < 0 { v = 0.0 } return metricdata.NewInt64Point(t, v) } // GetEntry returns a cumulative entry where each key for this cumulative has the value // given. // // The number of label values supplied must be exactly the same as the number // of keys supplied when this cumulative was created. func (c *Int64Cumulative) GetEntry(labelVals ...metricdata.LabelValue) (*Int64CumulativeEntry, error) { entry, err := c.bm.entryForValues(labelVals, func() baseEntry { return &Int64CumulativeEntry{} }) if err != nil { return nil, err } return entry.(*Int64CumulativeEntry), nil } // Inc increments the current cumulative entry value by val. It returns without incrementing if // the val is negative. func (e *Int64CumulativeEntry) Inc(val int64) { if val <= 0 { return } atomic.AddInt64(&e.val, val) } // Int64DerivedCumulative represents int64 cumulative value that is derived from an object. // // Int64DerivedCumulative maintains objects for each combination of label values. // These objects implement Int64DerivedCumulativeInterface to read instantaneous value // representing the object. type Int64DerivedCumulative struct { bm baseMetric } type int64DerivedCumulativeEntry struct { fn func() int64 } func (e *int64DerivedCumulativeEntry) read(t time.Time) metricdata.Point { // TODO: [rghetia] handle a condition where new value return by fn is lower than previous call. // It requires that we maintain the old values. return metricdata.NewInt64Point(t, e.fn()) } // UpsertEntry inserts or updates a derived cumulative entry for the given set of label values. // The object for which this cumulative entry is inserted or updated, must implement func() int64 // // It returns an error if // 1. The number of label values supplied are not the same as the number // of keys supplied when this cumulative was created. // 2. fn func() int64 is nil. func (c *Int64DerivedCumulative) UpsertEntry(fn func() int64, labelVals ...metricdata.LabelValue) error { if fn == nil { return errInvalidParam } return c.bm.upsertEntry(labelVals, func() baseEntry { return &int64DerivedCumulativeEntry{fn} }) } // Float64DerivedCumulative represents float64 cumulative value that is derived from an object. // // Float64DerivedCumulative maintains objects for each combination of label values. // These objects implement Float64DerivedCumulativeInterface to read instantaneous value // representing the object. type Float64DerivedCumulative struct { bm baseMetric } type float64DerivedCumulativeEntry struct { fn func() float64 } func (e *float64DerivedCumulativeEntry) read(t time.Time) metricdata.Point { return metricdata.NewFloat64Point(t, e.fn()) } // UpsertEntry inserts or updates a derived cumulative entry for the given set of label values. // The object for which this cumulative entry is inserted or updated, must implement func() float64 // // It returns an error if // 1. The number of label values supplied are not the same as the number // of keys supplied when this cumulative was created. // 2. fn func() float64 is nil. func (c *Float64DerivedCumulative) UpsertEntry(fn func() float64, labelVals ...metricdata.LabelValue) error { if fn == nil { return errInvalidParam } return c.bm.upsertEntry(labelVals, func() baseEntry { return &float64DerivedCumulativeEntry{fn} }) } opencensus-go-0.24.0/metric/cumulative_test.go000066400000000000000000000237151433102037600214200ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "testing" "time" "github.com/google/go-cmp/cmp" "go.opencensus.io/metric/metricdata" ) func TestCumulative(t *testing.T) { r := NewRegistry() f, _ := r.AddFloat64Cumulative("TestCumulative", WithLabelKeys("k1", "k2")) e, _ := f.GetEntry(metricdata.LabelValue{}, metricdata.LabelValue{}) e.Inc(5) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) e.Inc(1) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) e.Inc(1) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) e.Inc(1) m := r.Read() want := []*metricdata.Metric{ { Descriptor: metricdata.Descriptor{ Name: "TestCumulative", LabelKeys: []metricdata.LabelKey{ {Key: "k1"}, {Key: "k2"}, }, Type: metricdata.TypeCumulativeFloat64, }, TimeSeries: []*metricdata.TimeSeries{ { LabelValues: []metricdata.LabelValue{ {}, {}, }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 5), }, }, { LabelValues: []metricdata.LabelValue{ metricdata.NewLabelValue("k1v1"), {}, }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 2), }, }, { LabelValues: []metricdata.LabelValue{ metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2"), }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 1), }, }, }, }, } canonicalize(m) canonicalize(want) if diff := cmp.Diff(m, want, cmp.Comparer(ignoreTimes)); diff != "" { t.Errorf("-got +want: %s", diff) } } func TestCumulativeConstLabel(t *testing.T) { r := NewRegistry() f, _ := r.AddFloat64Cumulative("TestCumulativeWithConstLabel", WithLabelKeys("k1"), WithConstLabel(map[metricdata.LabelKey]metricdata.LabelValue{ {Key: "const"}: metricdata.NewLabelValue("same"), {Key: "const2"}: metricdata.NewLabelValue("same2"), })) e, _ := f.GetEntry(metricdata.LabelValue{}) e.Inc(5) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1")) e.Inc(1) m := r.Read() want := []*metricdata.Metric{ { Descriptor: metricdata.Descriptor{ Name: "TestCumulativeWithConstLabel", LabelKeys: []metricdata.LabelKey{ {Key: "const"}, {Key: "const2"}, {Key: "k1"}}, Type: metricdata.TypeCumulativeFloat64, }, TimeSeries: []*metricdata.TimeSeries{ { LabelValues: []metricdata.LabelValue{ metricdata.NewLabelValue("same"), metricdata.NewLabelValue("same2"), {}}, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 5), }, }, { LabelValues: []metricdata.LabelValue{ metricdata.NewLabelValue("same"), metricdata.NewLabelValue("same2"), metricdata.NewLabelValue("k1v1"), }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 1), }, }, }, }, } canonicalize(m) canonicalize(want) if diff := cmp.Diff(m, want, cmp.Comparer(ignoreTimes)); diff != "" { t.Errorf("-got +want: %s", diff) } } func TestCumulativeMetricDescriptor(t *testing.T) { r := NewRegistry() gf, _ := r.AddFloat64Cumulative("float64_gauge") compareType(gf.bm.desc.Type, metricdata.TypeCumulativeFloat64, t) gi, _ := r.AddInt64Cumulative("int64_gauge") compareType(gi.bm.desc.Type, metricdata.TypeCumulativeInt64, t) dgf, _ := r.AddFloat64DerivedCumulative("derived_float64_gauge") compareType(dgf.bm.desc.Type, metricdata.TypeCumulativeFloat64, t) dgi, _ := r.AddInt64DerivedCumulative("derived_int64_gauge") compareType(dgi.bm.desc.Type, metricdata.TypeCumulativeInt64, t) } func readAndCompareInt64Val(testname string, r *Registry, want int64, t *testing.T) { ms := r.Read() if got := ms[0].TimeSeries[0].Points[0].Value.(int64); got != want { t.Errorf("testname: %s, got = %v, want %v\n", testname, got, want) } } func TestInt64CumulativeEntry_IncNegative(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64Cumulative("bm") e, _ := g.GetEntry() e.Inc(5) readAndCompareInt64Val("inc", r, 5, t) e.Inc(-2) readAndCompareInt64Val("inc negative", r, 5, t) } func readAndCompareFloat64Val(testname string, r *Registry, want float64, t *testing.T) { ms := r.Read() if got := ms[0].TimeSeries[0].Points[0].Value.(float64); got != want { t.Errorf("testname: %s, got = %v, want %v\n", testname, got, want) } } func TestFloat64CumulativeEntry_IncNegative(t *testing.T) { r := NewRegistry() g, _ := r.AddFloat64Cumulative("bm") e, _ := g.GetEntry() e.Inc(5.0) readAndCompareFloat64Val("inc", r, 5.0, t) e.Inc(-2.0) readAndCompareFloat64Val("inc negative", r, 5.0, t) } func TestCumulativeWithSameNameDiffType(t *testing.T) { r := NewRegistry() r.AddInt64Cumulative("bm") _, gotErr := r.AddFloat64Cumulative("bm") if gotErr == nil { t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } _, gotErr = r.AddInt64DerivedCumulative("bm") if gotErr == nil { t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } _, gotErr = r.AddFloat64DerivedCumulative("bm") if gotErr == nil { t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } } func TestCumulativeWithLabelMismatch(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64Cumulative("bm", WithLabelKeys("k1")) _, gotErr := g.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) if gotErr == nil { t.Errorf("got: nil, want error: %v", errKeyValueMismatch) } } type sysUpTimeInNanoSecs struct { size int64 } func (q *sysUpTimeInNanoSecs) ToInt64() int64 { return q.size } func TestInt64DerivedCumulativeEntry_Inc(t *testing.T) { r := NewRegistry() q := &sysUpTimeInNanoSecs{3} g, _ := r.AddInt64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) err := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if err != nil { t.Errorf("want: nil, got: %v", err) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(3); got != want { t.Errorf("value = %v, want %v", got, want) } q.size = 5 ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(5); got != want { t.Errorf("value = %v, want %v", got, want) } } func TestInt64DerivedCumulativeEntry_IncWithNilObj(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr == nil { t.Errorf("expected error but got nil") } } func TestInt64DerivedCumulativeEntry_IncWithInvalidLabels(t *testing.T) { r := NewRegistry() q := &sysUpTimeInNanoSecs{3} g, _ := r.AddInt64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1")) if gotErr == nil { t.Errorf("expected error but got nil") } } func TestInt64DerivedCumulativeEntry_Update(t *testing.T) { r := NewRegistry() q := &sysUpTimeInNanoSecs{3} q2 := &sysUpTimeInNanoSecs{5} g, _ := r.AddInt64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) gotErr := g.UpsertEntry(q2.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr != nil { t.Errorf("got: %v, want: nil", gotErr) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(5); got != want { t.Errorf("value = %v, want %v", got, want) } } type sysUpTimeInSeconds struct { size float64 } func (q *sysUpTimeInSeconds) ToFloat64() float64 { return q.size } func TestFloat64DerivedCumulativeEntry_Inc(t *testing.T) { r := NewRegistry() q := &sysUpTimeInSeconds{5.0} g, _ := r.AddFloat64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) err := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if err != nil { t.Errorf("want: nil, got: %v", err) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { t.Errorf("value = %v, want %v", got, want) } q.size = 7 ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(7.0); got != want { t.Errorf("value = %v, want %v", got, want) } } func TestFloat64DerivedCumulativeEntry_IncWithNilObj(t *testing.T) { r := NewRegistry() g, _ := r.AddFloat64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr == nil { t.Errorf("expected error but got nil") } } func TestFloat64DerivedCumulativeEntry_IncWithInvalidLabels(t *testing.T) { r := NewRegistry() q := &sysUpTimeInSeconds{3} g, _ := r.AddFloat64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1")) if gotErr == nil { t.Errorf("expected error but got nil") } } func TestFloat64DerivedCumulativeEntry_Update(t *testing.T) { r := NewRegistry() q := &sysUpTimeInSeconds{3.0} q2 := &sysUpTimeInSeconds{5.0} g, _ := r.AddFloat64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) gotErr := g.UpsertEntry(q2.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr != nil { t.Errorf("got: %v, want: nil", gotErr) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { t.Errorf("value = %v, want %v", got, want) } } opencensus-go-0.24.0/metric/doc.go000066400000000000000000000014371433102037600167450ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package metric support for gauge and cumulative metrics. // // This is an EXPERIMENTAL package, and may change in arbitrary ways without // notice. package metric // import "go.opencensus.io/metric" opencensus-go-0.24.0/metric/error_const.go000066400000000000000000000016341433102037600205360ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import "errors" var ( errInvalidParam = errors.New("invalid parameter") errMetricExistsWithDiffType = errors.New("metric with same name exists with a different type") errKeyValueMismatch = errors.New("must supply the same number of label values as keys used to construct this metric") ) opencensus-go-0.24.0/metric/examples_test.go000066400000000000000000000022771433102037600210600ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric_test import ( "net/http" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" ) func ExampleRegistry_AddInt64Gauge() { r := metric.NewRegistry() // TODO: allow exporting from a registry g, _ := r.AddInt64Gauge("active_request", metric.WithDescription("Number of active requests, per method."), metric.WithUnit(metricdata.UnitDimensionless), metric.WithLabelKeys("method")) http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) { e, _ := g.GetEntry(metricdata.NewLabelValue(request.Method)) e.Add(1) defer e.Add(-1) // process request ... }) } opencensus-go-0.24.0/metric/gauge.go000066400000000000000000000132511433102037600172650ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "math" "sync/atomic" "time" "go.opencensus.io/metric/metricdata" ) // Float64Gauge represents a float64 value that can go up and down. // // Float64Gauge maintains a float64 value for each combination of of label values // passed to the Set or Add methods. type Float64Gauge struct { bm baseMetric } // Float64Entry represents a single value of the gauge corresponding to a set // of label values. type Float64Entry struct { val uint64 // needs to be uint64 for atomic access, interpret with math.Float64frombits } func (e *Float64Entry) read(t time.Time) metricdata.Point { v := math.Float64frombits(atomic.LoadUint64(&e.val)) if v < 0 { v = 0 } return metricdata.NewFloat64Point(t, v) } // GetEntry returns a gauge entry where each key for this gauge has the value // given. // // The number of label values supplied must be exactly the same as the number // of keys supplied when this gauge was created. func (g *Float64Gauge) GetEntry(labelVals ...metricdata.LabelValue) (*Float64Entry, error) { entry, err := g.bm.entryForValues(labelVals, func() baseEntry { return &Float64Entry{} }) if err != nil { return nil, err } return entry.(*Float64Entry), nil } // Set sets the gauge entry value to val. func (e *Float64Entry) Set(val float64) { atomic.StoreUint64(&e.val, math.Float64bits(val)) } // Add increments the gauge entry value by val. func (e *Float64Entry) Add(val float64) { var swapped bool for !swapped { oldVal := atomic.LoadUint64(&e.val) newVal := math.Float64bits(math.Float64frombits(oldVal) + val) swapped = atomic.CompareAndSwapUint64(&e.val, oldVal, newVal) } } // Int64Gauge represents a int64 gauge value that can go up and down. // // Int64Gauge maintains an int64 value for each combination of label values passed to the // Set or Add methods. type Int64Gauge struct { bm baseMetric } // Int64GaugeEntry represents a single value of the gauge corresponding to a set // of label values. type Int64GaugeEntry struct { val int64 } func (e *Int64GaugeEntry) read(t time.Time) metricdata.Point { v := atomic.LoadInt64(&e.val) if v < 0 { v = 0.0 } return metricdata.NewInt64Point(t, v) } // GetEntry returns a gauge entry where each key for this gauge has the value // given. // // The number of label values supplied must be exactly the same as the number // of keys supplied when this gauge was created. func (g *Int64Gauge) GetEntry(labelVals ...metricdata.LabelValue) (*Int64GaugeEntry, error) { entry, err := g.bm.entryForValues(labelVals, func() baseEntry { return &Int64GaugeEntry{} }) if err != nil { return nil, err } return entry.(*Int64GaugeEntry), nil } // Set sets the value of the gauge entry to the provided value. func (e *Int64GaugeEntry) Set(val int64) { atomic.StoreInt64(&e.val, val) } // Add increments the current gauge entry value by val, which may be negative. func (e *Int64GaugeEntry) Add(val int64) { atomic.AddInt64(&e.val, val) } // Int64DerivedGauge represents int64 gauge value that is derived from an object. // // Int64DerivedGauge maintains objects for each combination of label values. // These objects implement Int64DerivedGaugeInterface to read instantaneous value // representing the object. type Int64DerivedGauge struct { bm baseMetric } type int64DerivedGaugeEntry struct { fn func() int64 } func (e *int64DerivedGaugeEntry) read(t time.Time) metricdata.Point { return metricdata.NewInt64Point(t, e.fn()) } // UpsertEntry inserts or updates a derived gauge entry for the given set of label values. // The object for which this gauge entry is inserted or updated, must implement func() int64 // // It returns an error if // 1. The number of label values supplied are not the same as the number // of keys supplied when this gauge was created. // 2. fn func() int64 is nil. func (g *Int64DerivedGauge) UpsertEntry(fn func() int64, labelVals ...metricdata.LabelValue) error { if fn == nil { return errInvalidParam } return g.bm.upsertEntry(labelVals, func() baseEntry { return &int64DerivedGaugeEntry{fn} }) } // Float64DerivedGauge represents float64 gauge value that is derived from an object. // // Float64DerivedGauge maintains objects for each combination of label values. // These objects implement Float64DerivedGaugeInterface to read instantaneous value // representing the object. type Float64DerivedGauge struct { bm baseMetric } type float64DerivedGaugeEntry struct { fn func() float64 } func (e *float64DerivedGaugeEntry) read(t time.Time) metricdata.Point { return metricdata.NewFloat64Point(t, e.fn()) } // UpsertEntry inserts or updates a derived gauge entry for the given set of label values. // The object for which this gauge entry is inserted or updated, must implement func() float64 // // It returns an error if // 1. The number of label values supplied are not the same as the number // of keys supplied when this gauge was created. // 2. fn func() float64 is nil. func (g *Float64DerivedGauge) UpsertEntry(fn func() float64, labelVals ...metricdata.LabelValue) error { if fn == nil { return errInvalidParam } return g.bm.upsertEntry(labelVals, func() baseEntry { return &float64DerivedGaugeEntry{fn} }) } opencensus-go-0.24.0/metric/gauge_test.go000066400000000000000000000362561433102037600203360ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "fmt" "sort" "testing" "time" "github.com/google/go-cmp/cmp" "go.opencensus.io/metric/metricdata" ) func TestGauge(t *testing.T) { r := NewRegistry() f, _ := r.AddFloat64Gauge("TestGauge", WithLabelKeys("k1", "k2")) e, _ := f.GetEntry(metricdata.LabelValue{}, metricdata.LabelValue{}) e.Set(5) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) e.Add(1) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) e.Add(1) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) e.Add(1) m := r.Read() want := []*metricdata.Metric{ { Descriptor: metricdata.Descriptor{ Name: "TestGauge", LabelKeys: []metricdata.LabelKey{ {Key: "k1"}, {Key: "k2"}, }, Type: metricdata.TypeGaugeFloat64, }, TimeSeries: []*metricdata.TimeSeries{ { LabelValues: []metricdata.LabelValue{ {}, {}, }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 5), }, }, { LabelValues: []metricdata.LabelValue{ metricdata.NewLabelValue("k1v1"), {}, }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 2), }, }, { LabelValues: []metricdata.LabelValue{ metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2"), }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 1), }, }, }, }, } canonicalize(m) canonicalize(want) if diff := cmp.Diff(m, want, cmp.Comparer(ignoreTimes)); diff != "" { t.Errorf("-got +want: %s", diff) } } func TestGaugeConstLabel(t *testing.T) { r := NewRegistry() f, _ := r.AddFloat64Gauge("TestGaugeWithConstLabel", WithLabelKeys("k1"), WithConstLabel(map[metricdata.LabelKey]metricdata.LabelValue{ {Key: "const"}: metricdata.NewLabelValue("same"), {Key: "const2"}: metricdata.NewLabelValue("same2"), })) e, _ := f.GetEntry(metricdata.LabelValue{}) e.Set(5) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1")) e.Add(1) m := r.Read() want := []*metricdata.Metric{ { Descriptor: metricdata.Descriptor{ Name: "TestGaugeWithConstLabel", LabelKeys: []metricdata.LabelKey{ {Key: "const"}, {Key: "const2"}, {Key: "k1"}}, Type: metricdata.TypeGaugeFloat64, }, TimeSeries: []*metricdata.TimeSeries{ { LabelValues: []metricdata.LabelValue{ metricdata.NewLabelValue("same"), metricdata.NewLabelValue("same2"), {}, }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 5), }, }, { LabelValues: []metricdata.LabelValue{ metricdata.NewLabelValue("same"), metricdata.NewLabelValue("same2"), metricdata.NewLabelValue("k1v1"), }, Points: []metricdata.Point{ metricdata.NewFloat64Point(time.Time{}, 1), }, }, }, }, } canonicalize(m) canonicalize(want) if diff := cmp.Diff(m, want, cmp.Comparer(ignoreTimes)); diff != "" { t.Errorf("-got +want: %s", diff) } } func TestGaugeMetricDescriptor(t *testing.T) { r := NewRegistry() gf, _ := r.AddFloat64Gauge("float64_gauge") compareType(gf.bm.desc.Type, metricdata.TypeGaugeFloat64, t) gi, _ := r.AddInt64Gauge("int64_gauge") compareType(gi.bm.desc.Type, metricdata.TypeGaugeInt64, t) dgf, _ := r.AddFloat64DerivedGauge("derived_float64_gauge") compareType(dgf.bm.desc.Type, metricdata.TypeGaugeFloat64, t) dgi, _ := r.AddInt64DerivedGauge("derived_int64_gauge") compareType(dgi.bm.desc.Type, metricdata.TypeGaugeInt64, t) } func compareType(got, want metricdata.Type, t *testing.T) { if got != want { t.Errorf("metricdata type: got %v, want %v\n", got, want) } } func TestGaugeMetricOptionDesc(t *testing.T) { r := NewRegistry() name := "testOptDesc" gf, _ := r.AddFloat64Gauge(name, WithDescription("test")) want := metricdata.Descriptor{ Name: name, Description: "test", Type: metricdata.TypeGaugeFloat64, } got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric option description: got %v, want %v\n", got, want) } } func TestGaugeMetricOptionUnit(t *testing.T) { r := NewRegistry() name := "testOptUnit" gf, _ := r.AddFloat64Gauge(name, WithUnit(metricdata.UnitMilliseconds)) want := metricdata.Descriptor{ Name: name, Unit: metricdata.UnitMilliseconds, Type: metricdata.TypeGaugeFloat64, } got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric descriptor: got %v, want %v\n", got, want) } } func TestGaugeMetricOptionLabelKeys(t *testing.T) { r := NewRegistry() name := "testOptUnit" gf, _ := r.AddFloat64Gauge(name, WithLabelKeys("k1", "k3")) want := metricdata.Descriptor{ Name: name, LabelKeys: []metricdata.LabelKey{ {Key: "k1"}, {Key: "k3"}, }, Type: metricdata.TypeGaugeFloat64, } got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric descriptor: got %v, want %v\n", got, want) } } func TestGaugeMetricOptionLabelKeysAndDesc(t *testing.T) { r := NewRegistry() name := "testOptUnit" lks := []metricdata.LabelKey{} lks = append(lks, metricdata.LabelKey{Key: "k1", Description: "desc k1"}, metricdata.LabelKey{Key: "k3", Description: "desc k3"}) gf, _ := r.AddFloat64Gauge(name, WithLabelKeysAndDescription(lks...)) want := metricdata.Descriptor{ Name: name, LabelKeys: []metricdata.LabelKey{ {Key: "k1", Description: "desc k1"}, {Key: "k3", Description: "desc k3"}, }, Type: metricdata.TypeGaugeFloat64, } got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric descriptor: got %v, want %v\n", got, want) } } func TestGaugeMetricOptionDefault(t *testing.T) { r := NewRegistry() name := "testOptUnit" gf, _ := r.AddFloat64Gauge(name) want := metricdata.Descriptor{ Name: name, Type: metricdata.TypeGaugeFloat64, } got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric descriptor: got %v, want %v\n", got, want) } } func TestFloat64Entry_Add(t *testing.T) { r := NewRegistry() g, _ := r.AddFloat64Gauge("g") e, _ := g.GetEntry() e.Add(0) ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 0.0; got != want { t.Errorf("value = %v, want %v", got, want) } e, _ = g.GetEntry() e.Add(1) ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 1.0; got != want { t.Errorf("value = %v, want %v", got, want) } e, _ = g.GetEntry() e.Add(-1) ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 0.0; got != want { t.Errorf("value = %v, want %v", got, want) } } func TestFloat64Gauge_Add_NegativeTotals(t *testing.T) { r := NewRegistry() g, _ := r.AddFloat64Gauge("g") e, _ := g.GetEntry() e.Add(-1.0) ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(0); got != want { t.Errorf("value = %v, want %v", got, want) } } func TestInt64GaugeEntry_Add(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64Gauge("g") e, _ := g.GetEntry() e.Add(0) ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(0); got != want { t.Errorf("value = %v, want %v", got, want) } e, _ = g.GetEntry() e.Add(1) ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(1); got != want { t.Errorf("value = %v, want %v", got, want) } } func TestInt64Gauge_Add_NegativeTotals(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64Gauge("g") e, _ := g.GetEntry() e.Add(-1) ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(0); got != want { t.Errorf("value = %v, want %v", got, want) } } func TestGaugeWithSameNameDiffType(t *testing.T) { r := NewRegistry() r.AddInt64Gauge("g") _, gotErr := r.AddFloat64Gauge("g") if gotErr == nil { t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } _, gotErr = r.AddInt64DerivedGauge("g") if gotErr == nil { t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } _, gotErr = r.AddFloat64DerivedGauge("g") if gotErr == nil { t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } } func TestGaugeWithLabelMismatch(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64Gauge("g", WithLabelKeys("k1")) _, gotErr := g.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) if gotErr == nil { t.Errorf("got: nil, want error: %v", errKeyValueMismatch) } } func TestMapKey(t *testing.T) { cases := [][]metricdata.LabelValue{ {}, {metricdata.LabelValue{}}, {metricdata.NewLabelValue("")}, {metricdata.NewLabelValue("-")}, {metricdata.NewLabelValue(",")}, {metricdata.NewLabelValue("v1"), metricdata.NewLabelValue("v2")}, {metricdata.NewLabelValue("v1"), metricdata.LabelValue{}}, {metricdata.NewLabelValue("v1"), metricdata.LabelValue{}, metricdata.NewLabelValue(string([]byte{0}))}, {metricdata.LabelValue{}, metricdata.LabelValue{}}, } for i, tc := range cases { t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { g := &baseMetric{ keys: make([]metricdata.LabelKey, len(tc)), } mk := g.encodeLabelVals(tc) vals := g.decodeLabelVals(mk) if diff := cmp.Diff(vals, tc); diff != "" { t.Errorf("values differ after serialization -got +want: %s", diff) } }) } } func TestRaceCondition(t *testing.T) { r := NewRegistry() // start reader before adding Gauge metric. var ms = []*metricdata.Metric{} for i := 0; i < 5; i++ { go func(k int) { for j := 0; j < 5; j++ { g, _ := r.AddInt64Gauge(fmt.Sprintf("g%d%d", k, j)) e, _ := g.GetEntry() e.Add(1) } }(i) } time.Sleep(1 * time.Second) ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(1); got != want { t.Errorf("value = %v, want %v", got, want) } } func ignoreTimes(_, _ time.Time) bool { return true } func canonicalize(ms []*metricdata.Metric) { for _, m := range ms { sort.Slice(m.TimeSeries, func(i, j int) bool { // sort time series by their label values iStr := "" for _, label := range m.TimeSeries[i].LabelValues { iStr += fmt.Sprintf("%+v", label) } jStr := "" for _, label := range m.TimeSeries[j].LabelValues { jStr += fmt.Sprintf("%+v", label) } return iStr < jStr }) } } type queueInt64 struct { size int64 } func (q *queueInt64) ToInt64() int64 { return q.size } func TestInt64DerivedGaugeEntry_Add(t *testing.T) { r := NewRegistry() q := &queueInt64{3} g, _ := r.AddInt64DerivedGauge("g", WithLabelKeys("k1", "k2")) err := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if err != nil { t.Errorf("want: nil, got: %v", err) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(3); got != want { t.Errorf("value = %v, want %v", got, want) } q.size = 5 ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(5); got != want { t.Errorf("value = %v, want %v", got, want) } } func TestInt64DerivedGaugeEntry_AddWithNilObj(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64DerivedGauge("g", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr == nil { t.Errorf("expected error but got nil") } } func TestInt64DerivedGaugeEntry_AddWithInvalidLabels(t *testing.T) { r := NewRegistry() q := &queueInt64{3} g, _ := r.AddInt64DerivedGauge("g", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1")) if gotErr == nil { t.Errorf("expected error but got nil") } } func TestInt64DerivedGaugeEntry_Update(t *testing.T) { r := NewRegistry() q := &queueInt64{3} q2 := &queueInt64{5} g, _ := r.AddInt64DerivedGauge("g", WithLabelKeys("k1", "k2")) g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) gotErr := g.UpsertEntry(q2.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr != nil { t.Errorf("got: %v, want: nil", gotErr) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(5); got != want { t.Errorf("value = %v, want %v", got, want) } } func TestInt64DerivedGaugeEntry_UpsertConstLabels(t *testing.T) { r := NewRegistry() q := &queueInt64{3} g, _ := r.AddInt64DerivedGauge("g", WithConstLabel(map[metricdata.LabelKey]metricdata.LabelValue{ {Key: "const"}: metricdata.NewLabelValue("same"), })) err := g.UpsertEntry(q.ToInt64) if err != nil { t.Errorf("want: nil, got: %v", err) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(3); got != want { t.Errorf("value = %v, want %v", got, want) } if got, want := ms[0].Descriptor.LabelKeys[0].Key, "const"; got != want { t.Errorf("label key = %v, want %v", got, want) } if got, want := ms[0].TimeSeries[0].LabelValues[0].Value, "same"; got != want { t.Errorf("label value = %v, want %v", got, want) } } type queueFloat64 struct { size float64 } func (q *queueFloat64) ToFloat64() float64 { return q.size } func TestFloat64DerivedGaugeEntry_Add(t *testing.T) { r := NewRegistry() q := &queueFloat64{5.0} g, _ := r.AddFloat64DerivedGauge("g", WithLabelKeys("k1", "k2")) err := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if err != nil { t.Errorf("want: nil, got: %v", err) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { t.Errorf("value = %v, want %v", got, want) } q.size = 5 ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { t.Errorf("value = %v, want %v", got, want) } } func TestFloat64DerivedGaugeEntry_AddWithNilObj(t *testing.T) { r := NewRegistry() g, _ := r.AddFloat64DerivedGauge("g", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr == nil { t.Errorf("expected error but got nil") } } func TestFloat64DerivedGaugeEntry_AddWithInvalidLabels(t *testing.T) { r := NewRegistry() q := &queueFloat64{3} g, _ := r.AddFloat64DerivedGauge("g", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1")) if gotErr == nil { t.Errorf("expected error but got nil") } } func TestFloat64DerivedGaugeEntry_Update(t *testing.T) { r := NewRegistry() q := &queueFloat64{3.0} q2 := &queueFloat64{5.0} g, _ := r.AddFloat64DerivedGauge("g", WithLabelKeys("k1", "k2")) g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) gotErr := g.UpsertEntry(q2.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr != nil { t.Errorf("got: %v, want: nil", gotErr) } ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { t.Errorf("value = %v, want %v", got, want) } } opencensus-go-0.24.0/metric/metricdata/000077500000000000000000000000001433102037600177615ustar00rootroot00000000000000opencensus-go-0.24.0/metric/metricdata/doc.go000066400000000000000000000014511433102037600210560ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package metricdata contains the metrics data model. // // This is an EXPERIMENTAL package, and may change in arbitrary ways without // notice. package metricdata // import "go.opencensus.io/metric/metricdata" opencensus-go-0.24.0/metric/metricdata/exemplar.go000066400000000000000000000023541433102037600221310ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricdata import ( "time" ) // Exemplars keys. const ( AttachmentKeySpanContext = "SpanContext" ) // Exemplar is an example data point associated with each bucket of a // distribution type aggregation. // // Their purpose is to provide an example of the kind of thing // (request, RPC, trace span, etc.) that resulted in that measurement. type Exemplar struct { Value float64 // the value that was recorded Timestamp time.Time // the time the value was recorded Attachments Attachments // attachments (if any) } // Attachments is a map of extra values associated with a recorded data point. type Attachments map[string]interface{} opencensus-go-0.24.0/metric/metricdata/label.go000066400000000000000000000023611433102037600213710ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricdata // LabelKey represents key of a label. It has optional // description attribute. type LabelKey struct { Key string Description string } // LabelValue represents the value of a label. // The zero value represents a missing label value, which may be treated // differently to an empty string value by some back ends. type LabelValue struct { Value string // string value of the label Present bool // flag that indicated whether a value is present or not } // NewLabelValue creates a new non-nil LabelValue that represents the given string. func NewLabelValue(val string) LabelValue { return LabelValue{Value: val, Present: true} } opencensus-go-0.24.0/metric/metricdata/metric.go000066400000000000000000000031441433102037600215750ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricdata import ( "time" "go.opencensus.io/resource" ) // Descriptor holds metadata about a metric. type Descriptor struct { Name string // full name of the metric Description string // human-readable description Unit Unit // units for the measure Type Type // type of measure LabelKeys []LabelKey // label keys } // Metric represents a quantity measured against a resource with different // label value combinations. type Metric struct { Descriptor Descriptor // metric descriptor Resource *resource.Resource // resource against which this was measured TimeSeries []*TimeSeries // one time series for each combination of label values } // TimeSeries is a sequence of points associated with a combination of label // values. type TimeSeries struct { LabelValues []LabelValue // label values, same order as keys in the metric descriptor Points []Point // points sequence StartTime time.Time // time we started recording this time series } opencensus-go-0.24.0/metric/metricdata/point.go000066400000000000000000000137251433102037600214510ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricdata import ( "time" ) // Point is a single data point of a time series. type Point struct { // Time is the point in time that this point represents in a time series. Time time.Time // Value is the value of this point. Prefer using ReadValue to switching on // the value type, since new value types might be added. Value interface{} } //go:generate stringer -type ValueType // NewFloat64Point creates a new Point holding a float64 value. func NewFloat64Point(t time.Time, val float64) Point { return Point{ Value: val, Time: t, } } // NewInt64Point creates a new Point holding an int64 value. func NewInt64Point(t time.Time, val int64) Point { return Point{ Value: val, Time: t, } } // NewDistributionPoint creates a new Point holding a Distribution value. func NewDistributionPoint(t time.Time, val *Distribution) Point { return Point{ Value: val, Time: t, } } // NewSummaryPoint creates a new Point holding a Summary value. func NewSummaryPoint(t time.Time, val *Summary) Point { return Point{ Value: val, Time: t, } } // ValueVisitor allows reading the value of a point. type ValueVisitor interface { VisitFloat64Value(float64) VisitInt64Value(int64) VisitDistributionValue(*Distribution) VisitSummaryValue(*Summary) } // ReadValue accepts a ValueVisitor and calls the appropriate method with the // value of this point. // Consumers of Point should use this in preference to switching on the type // of the value directly, since new value types may be added. func (p Point) ReadValue(vv ValueVisitor) { switch v := p.Value.(type) { case int64: vv.VisitInt64Value(v) case float64: vv.VisitFloat64Value(v) case *Distribution: vv.VisitDistributionValue(v) case *Summary: vv.VisitSummaryValue(v) default: panic("unexpected value type") } } // Distribution contains summary statistics for a population of values. It // optionally contains a histogram representing the distribution of those // values across a set of buckets. type Distribution struct { // Count is the number of values in the population. Must be non-negative. This value // must equal the sum of the values in bucket_counts if a histogram is // provided. Count int64 // Sum is the sum of the values in the population. If count is zero then this field // must be zero. Sum float64 // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the // population. For values x_i this is: // // Sum[i=1..n]((x_i - mean)^2) // // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition // describes Welford's method for accumulating this sum in one pass. // // If count is zero then this field must be zero. SumOfSquaredDeviation float64 // BucketOptions describes the bounds of the histogram buckets in this // distribution. // // A Distribution may optionally contain a histogram of the values in the // population. // // If nil, there is no associated histogram. BucketOptions *BucketOptions // Bucket If the distribution does not have a histogram, then omit this field. // If there is a histogram, then the sum of the values in the Bucket counts // must equal the value in the count field of the distribution. Buckets []Bucket } // BucketOptions describes the bounds of the histogram buckets in this // distribution. type BucketOptions struct { // Bounds specifies a set of bucket upper bounds. // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket // index i are: // // [0, Bounds[i]) for i == 0 // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 // [Bounds[i-1], +infinity) for i == N-1 Bounds []float64 } // Bucket represents a single bucket (value range) in a distribution. type Bucket struct { // Count is the number of values in each bucket of the histogram, as described in // bucket_bounds. Count int64 // Exemplar associated with this bucket (if any). Exemplar *Exemplar } // Summary is a representation of percentiles. type Summary struct { // Count is the cumulative count (if available). Count int64 // Sum is the cumulative sum of values (if available). Sum float64 // HasCountAndSum is true if Count and Sum are available. HasCountAndSum bool // Snapshot represents percentiles calculated over an arbitrary time window. // The values in this struct can be reset at arbitrary unknown times, with // the requirement that all of them are reset at the same time. Snapshot Snapshot } // Snapshot represents percentiles over an arbitrary time. // The values in this struct can be reset at arbitrary unknown times, with // the requirement that all of them are reset at the same time. type Snapshot struct { // Count is the number of values in the snapshot. Optional since some systems don't // expose this. Set to 0 if not available. Count int64 // Sum is the sum of values in the snapshot. Optional since some systems don't // expose this. If count is 0 then this field must be zero. Sum float64 // Percentiles is a map from percentile (range (0-100.0]) to the value of // the percentile. Percentiles map[float64]float64 } //go:generate stringer -type Type // Type is the overall type of metric, including its value type and whether it // represents a cumulative total (since the start time) or if it represents a // gauge value. type Type int // Metric types. const ( TypeGaugeInt64 Type = iota TypeGaugeFloat64 TypeGaugeDistribution TypeCumulativeInt64 TypeCumulativeFloat64 TypeCumulativeDistribution TypeSummary ) opencensus-go-0.24.0/metric/metricdata/type_string.go000066400000000000000000000007651433102037600226670ustar00rootroot00000000000000// Code generated by "stringer -type Type"; DO NOT EDIT. package metricdata import "strconv" const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} func (i Type) String() string { if i < 0 || i >= Type(len(_Type_index)-1) { return "Type(" + strconv.FormatInt(int64(i), 10) + ")" } return _Type_name[_Type_index[i]:_Type_index[i+1]] } opencensus-go-0.24.0/metric/metricdata/unit.go000066400000000000000000000017671433102037600213020ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricdata // Unit is a string encoded according to the case-sensitive abbreviations from the // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html type Unit string // Predefined units. To record against a unit not represented here, create your // own Unit type constant from a string. const ( UnitDimensionless Unit = "1" UnitBytes Unit = "By" UnitMilliseconds Unit = "ms" ) opencensus-go-0.24.0/metric/metricexport/000077500000000000000000000000001433102037600203715ustar00rootroot00000000000000opencensus-go-0.24.0/metric/metricexport/doc.go000066400000000000000000000014721433102037600214710ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package metricexport contains support for exporting metric data. // // This is an EXPERIMENTAL package, and may change in arbitrary ways without // notice. package metricexport // import "go.opencensus.io/metric/metricexport" opencensus-go-0.24.0/metric/metricexport/export.go000066400000000000000000000015321433102037600222420ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricexport import ( "context" "go.opencensus.io/metric/metricdata" ) // Exporter is an interface that exporters implement to export the metric data. type Exporter interface { ExportMetrics(ctx context.Context, data []*metricdata.Metric) error } opencensus-go-0.24.0/metric/metricexport/reader.go000066400000000000000000000124341433102037600221660ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package metricexport import ( "context" "fmt" "sync" "time" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/trace" ) var ( defaultSampler = trace.ProbabilitySampler(0.0001) errReportingIntervalTooLow = fmt.Errorf("reporting interval less than %d", minimumReportingDuration) errAlreadyStarted = fmt.Errorf("already started") errIntervalReaderNil = fmt.Errorf("interval reader is nil") errExporterNil = fmt.Errorf("exporter is nil") errReaderNil = fmt.Errorf("reader is nil") ) const ( defaultReportingDuration = 60 * time.Second minimumReportingDuration = 1 * time.Second defaultSpanName = "ExportMetrics" ) // ReaderOptions contains options pertaining to metrics reader. type ReaderOptions struct { // SpanName is the name used for span created to export metrics. SpanName string } // Reader reads metrics from all producers registered // with producer manager and exports those metrics using provided // exporter. type Reader struct { sampler trace.Sampler spanName string } // IntervalReader periodically reads metrics from all producers registered // with producer manager and exports those metrics using provided // exporter. Call Reader.Stop() to stop the reader. type IntervalReader struct { // ReportingInterval it the time duration between two consecutive // metrics reporting. defaultReportingDuration is used if it is not set. // It cannot be set lower than minimumReportingDuration. ReportingInterval time.Duration exporter Exporter timer *time.Ticker quit, done chan bool mu sync.RWMutex reader *Reader } // ReaderOption apply changes to ReaderOptions. type ReaderOption func(*ReaderOptions) // WithSpanName makes new reader to use given span name when exporting metrics. func WithSpanName(spanName string) ReaderOption { return func(o *ReaderOptions) { o.SpanName = spanName } } // NewReader returns a reader configured with specified options. func NewReader(o ...ReaderOption) *Reader { var opts ReaderOptions for _, op := range o { op(&opts) } reader := &Reader{defaultSampler, defaultSpanName} if opts.SpanName != "" { reader.spanName = opts.SpanName } return reader } // NewIntervalReader creates a reader. Once started it periodically // reads metrics from all producers and exports them using provided exporter. func NewIntervalReader(reader *Reader, exporter Exporter) (*IntervalReader, error) { if exporter == nil { return nil, errExporterNil } if reader == nil { return nil, errReaderNil } r := &IntervalReader{ exporter: exporter, reader: reader, } return r, nil } // Start starts the IntervalReader which periodically reads metrics from all // producers registered with global producer manager. If the reporting interval // is not set prior to calling this function then default reporting interval // is used. func (ir *IntervalReader) Start() error { if ir == nil { return errIntervalReaderNil } ir.mu.Lock() defer ir.mu.Unlock() var reportingInterval = defaultReportingDuration if ir.ReportingInterval != 0 { if ir.ReportingInterval < minimumReportingDuration { return errReportingIntervalTooLow } reportingInterval = ir.ReportingInterval } if ir.quit != nil { return errAlreadyStarted } ir.timer = time.NewTicker(reportingInterval) ir.quit = make(chan bool) ir.done = make(chan bool) go ir.startInternal() return nil } func (ir *IntervalReader) startInternal() { for { select { case <-ir.timer.C: ir.reader.ReadAndExport(ir.exporter) case <-ir.quit: ir.timer.Stop() ir.done <- true return } } } // Stop stops the reader from reading and exporting metrics. // Additional call to Stop are no-ops. func (ir *IntervalReader) Stop() { if ir == nil { return } ir.mu.Lock() defer ir.mu.Unlock() if ir.quit == nil { return } ir.quit <- true <-ir.done close(ir.quit) close(ir.done) ir.quit = nil } // Flush flushes the metrics if IntervalReader is stopped, otherwise no-op. func (ir *IntervalReader) Flush() { ir.mu.Lock() defer ir.mu.Unlock() // No-op if IntervalReader is not stopped if ir.quit != nil { return } ir.reader.ReadAndExport(ir.exporter) } // ReadAndExport reads metrics from all producer registered with // producer manager and then exports them using provided exporter. func (r *Reader) ReadAndExport(exporter Exporter) { ctx, span := trace.StartSpan(context.Background(), r.spanName, trace.WithSampler(r.sampler)) defer span.End() producers := metricproducer.GlobalManager().GetAll() data := []*metricdata.Metric{} for _, producer := range producers { data = append(data, producer.Read()...) } // TODO: [rghetia] add metrics for errors. exporter.ExportMetrics(ctx, data) } opencensus-go-0.24.0/metric/metricexport/reader_test.go000066400000000000000000000177401433102037600232320ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package metricexport import ( "context" "sync" "testing" "time" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) var ( ir1 *IntervalReader ir2 *IntervalReader reader1 = NewReader(WithSpanName("test-export-span")) exporter1 = &metricExporter{} exporter2 = &metricExporter{} gaugeEntry *metric.Int64GaugeEntry duration1 = 1000 * time.Millisecond duration2 = 2000 * time.Millisecond ) type metricExporter struct { sync.Mutex metrics []*metricdata.Metric } func (e *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { e.Lock() defer e.Unlock() e.metrics = append(e.metrics, metrics...) return nil } func init() { r := metric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) g, _ := r.AddInt64Gauge("active_request", metric.WithDescription("Number of active requests, per method."), metric.WithUnit(metricdata.UnitDimensionless), metric.WithLabelKeys("method")) gaugeEntry, _ = g.GetEntry(metricdata.NewLabelValue("foo")) } func TestNewReaderWitDefaultOptions(t *testing.T) { r := NewReader() if r.spanName != defaultSpanName { t.Errorf("span name: got %v, want %v\n", r.spanName, defaultSpanName) } } func TestNewReaderWitSpanName(t *testing.T) { spanName := "test-span" r := NewReader(WithSpanName(spanName)) if r.spanName != spanName { t.Errorf("span name: got %+v, want %v\n", r.spanName, spanName) } } func TestNewReader(t *testing.T) { r := NewReader() gaugeEntry.Add(1) r.ReadAndExport(exporter1) checkExportedCount(exporter1, 1, t) checkExportedMetricDesc(exporter1, "active_request", t) resetExporter(exporter1) } func TestNewIntervalReader(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) gaugeEntry.Add(1) time.Sleep(1500 * time.Millisecond) checkExportedCount(exporter1, 1, t) checkExportedMetricDesc(exporter1, "active_request", t) ir1.Stop() resetExporter(exporter1) } func TestManualReadForIntervalReader(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) gaugeEntry.Set(1) reader1.ReadAndExport(exporter1) gaugeEntry.Set(4) time.Sleep(1500 * time.Millisecond) checkExportedCount(exporter1, 2, t) checkExportedValues(exporter1, []int64{1, 4}, t) // one for manual read other for time based. checkExportedMetricDesc(exporter1, "active_request", t) ir1.Stop() resetExporter(exporter1) } func TestFlushNoOpForIntervalReader(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) gaugeEntry.Set(1) // since IR is not stopped, flush does nothing ir1.Flush() // expect no data points checkExportedCount(exporter1, 0, t) checkExportedMetricDesc(exporter1, "active_request", t) ir1.Stop() resetExporter(exporter1) } func TestFlushAllowMultipleForIntervalReader(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) gaugeEntry.Set(1) ir1.Stop() ir1.Flush() // metric is still coming in gaugeEntry.Add(1) // one more flush after IR stopped ir1.Flush() // expect 2 data point, one from each flush checkExportedCount(exporter1, 2, t) checkExportedValues(exporter1, []int64{1, 2}, t) checkExportedMetricDesc(exporter1, "active_request", t) resetExporter(exporter1) } func TestFlushRestartForIntervalReader(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) gaugeEntry.Set(1) ir1.Stop() ir1.Flush() // restart the IR err := ir1.Start() if err != nil { t.Fatalf("error starting reader %v\n", err) } gaugeEntry.Add(1) ir1.Stop() ir1.Flush() // expect 2 data point, one from each flush checkExportedCount(exporter1, 2, t) checkExportedValues(exporter1, []int64{1, 2}, t) checkExportedMetricDesc(exporter1, "active_request", t) resetExporter(exporter1) } func TestProducerWithIntervalReaderStop(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) ir1.Stop() gaugeEntry.Add(1) time.Sleep(1500 * time.Millisecond) checkExportedCount(exporter1, 0, t) checkExportedMetricDesc(exporter1, "active_request", t) resetExporter(exporter1) } func TestProducerWithMultipleIntervalReaders(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) ir2 = createAndStart(exporter2, duration2, t) gaugeEntry.Add(1) time.Sleep(2500 * time.Millisecond) checkExportedCount(exporter1, 2, t) checkExportedMetricDesc(exporter1, "active_request", t) checkExportedCount(exporter2, 1, t) checkExportedMetricDesc(exporter2, "active_request", t) ir1.Stop() ir2.Stop() resetExporter(exporter1) resetExporter(exporter1) } func TestIntervalReaderMultipleStop(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) stop := make(chan bool, 1) go func() { ir1.Stop() ir1.Stop() stop <- true }() select { case _ = <-stop: case <-time.After(1 * time.Second): t.Fatalf("ir1 stop got blocked") } } func TestIntervalReaderMultipleStart(t *testing.T) { ir1 = createAndStart(exporter1, duration1, t) err := ir1.Start() if err == nil { t.Fatalf("expected error but got nil\n") } gaugeEntry.Add(1) time.Sleep(1500 * time.Millisecond) checkExportedCount(exporter1, 1, t) checkExportedMetricDesc(exporter1, "active_request", t) ir1.Stop() resetExporter(exporter1) } func TestNewIntervalReaderWithNilReader(t *testing.T) { _, err := NewIntervalReader(nil, exporter1) if err == nil { t.Fatalf("expected error but got nil\n") } } func TestNewIntervalReaderWithNilExporter(t *testing.T) { _, err := NewIntervalReader(reader1, nil) if err == nil { t.Fatalf("expected error but got nil\n") } } func TestNewIntervalReaderStartWithInvalidInterval(t *testing.T) { ir, err := NewIntervalReader(reader1, exporter1) ir.ReportingInterval = 500 * time.Millisecond err = ir.Start() if err == nil { t.Fatalf("expected error but got nil\n") } } func checkExportedCount(exporter *metricExporter, wantCount int, t *testing.T) { exporter.Lock() defer exporter.Unlock() gotCount := len(exporter.metrics) if gotCount != wantCount { t.Fatalf("exported metric count: got %d, want %d\n", gotCount, wantCount) } } func checkExportedValues(exporter *metricExporter, wantValues []int64, t *testing.T) { exporter.Lock() defer exporter.Unlock() gotCount := len(exporter.metrics) wantCount := len(wantValues) if gotCount != wantCount { t.Errorf("exported metric count: got %d, want %d\n", gotCount, wantCount) return } for i, wantValue := range wantValues { var gotValue int64 switch v := exporter.metrics[i].TimeSeries[0].Points[0].Value.(type) { case int64: gotValue = v default: t.Errorf("expected float64 value but found other %T", exporter.metrics[i].TimeSeries[0].Points[0].Value) } if gotValue != wantValue { t.Errorf("values idx %d, got: %v, want %v", i, gotValue, wantValue) } } } func checkExportedMetricDesc(exporter *metricExporter, wantMdName string, t *testing.T) { exporter.Lock() defer exporter.Unlock() for _, metric := range exporter.metrics { gotMdName := metric.Descriptor.Name if gotMdName != wantMdName { t.Errorf("got %s, want %s\n", gotMdName, wantMdName) } } exporter.metrics = nil } func resetExporter(exporter *metricExporter) { exporter.Lock() defer exporter.Unlock() exporter.metrics = nil } // createAndStart stops the current processors and creates a new one. func createAndStart(exporter *metricExporter, d time.Duration, t *testing.T) *IntervalReader { ir, _ := NewIntervalReader(reader1, exporter) ir.ReportingInterval = d err := ir.Start() if err != nil { t.Fatalf("error creating reader %v\n", err) } return ir } opencensus-go-0.24.0/metric/metricproducer/000077500000000000000000000000001433102037600206735ustar00rootroot00000000000000opencensus-go-0.24.0/metric/metricproducer/manager.go000066400000000000000000000043421433102037600226370ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricproducer import ( "sync" ) // Manager maintains a list of active producers. Producers can register // with the manager to allow readers to read all metrics provided by them. // Readers can retrieve all producers registered with the manager, // read metrics from the producers and export them. type Manager struct { mu sync.RWMutex producers map[Producer]struct{} } var prodMgr *Manager var once sync.Once // GlobalManager is a single instance of producer manager // that is used by all producers and all readers. func GlobalManager() *Manager { once.Do(func() { prodMgr = &Manager{} prodMgr.producers = make(map[Producer]struct{}) }) return prodMgr } // AddProducer adds the producer to the Manager if it is not already present. func (pm *Manager) AddProducer(producer Producer) { if producer == nil { return } pm.mu.Lock() defer pm.mu.Unlock() pm.producers[producer] = struct{}{} } // DeleteProducer deletes the producer from the Manager if it is present. func (pm *Manager) DeleteProducer(producer Producer) { if producer == nil { return } pm.mu.Lock() defer pm.mu.Unlock() delete(pm.producers, producer) } // GetAll returns a slice of all producer currently registered with // the Manager. For each call it generates a new slice. The slice // should not be cached as registration may change at any time. It is // typically called periodically by exporter to read metrics from // the producers. func (pm *Manager) GetAll() []Producer { pm.mu.Lock() defer pm.mu.Unlock() producers := make([]Producer, len(pm.producers)) i := 0 for producer := range pm.producers { producers[i] = producer i++ } return producers } opencensus-go-0.24.0/metric/metricproducer/manager_test.go000066400000000000000000000061641433102037600237020ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricproducer import ( "testing" "go.opencensus.io/metric/metricdata" ) type testProducer struct { name string } var ( myProd1 = newTestProducer("foo") myProd2 = newTestProducer("bar") myProd3 = newTestProducer("foobar") pm = GlobalManager() ) func newTestProducer(name string) *testProducer { return &testProducer{name} } func (mp *testProducer) Read() []*metricdata.Metric { return nil } func TestAdd(t *testing.T) { pm.AddProducer(myProd1) pm.AddProducer(myProd2) got := pm.GetAll() want := []*testProducer{myProd1, myProd2} checkSlice(got, want, t) deleteAll() } func TestAddExisting(t *testing.T) { pm.AddProducer(myProd1) pm.AddProducer(myProd2) pm.AddProducer(myProd1) got := pm.GetAll() want := []*testProducer{myProd2, myProd1} checkSlice(got, want, t) deleteAll() } func TestAddNil(t *testing.T) { pm.AddProducer(nil) got := pm.GetAll() want := []*testProducer{} checkSlice(got, want, t) deleteAll() } func TestDelete(t *testing.T) { pm.AddProducer(myProd1) pm.AddProducer(myProd2) pm.AddProducer(myProd3) pm.DeleteProducer(myProd2) got := pm.GetAll() want := []*testProducer{myProd1, myProd3} checkSlice(got, want, t) deleteAll() } func TestDeleteNonExisting(t *testing.T) { pm.AddProducer(myProd1) pm.AddProducer(myProd3) pm.DeleteProducer(myProd2) got := pm.GetAll() want := []*testProducer{myProd1, myProd3} checkSlice(got, want, t) deleteAll() } func TestDeleteNil(t *testing.T) { pm.AddProducer(myProd1) pm.AddProducer(myProd3) pm.DeleteProducer(nil) got := pm.GetAll() want := []*testProducer{myProd1, myProd3} checkSlice(got, want, t) deleteAll() } func TestGetAllNil(t *testing.T) { got := pm.GetAll() want := []*testProducer{} checkSlice(got, want, t) deleteAll() } func TestImmutableProducerList(t *testing.T) { pm.AddProducer(myProd1) pm.AddProducer(myProd2) producersToMutate := pm.GetAll() producersToMutate[0] = myProd3 got := pm.GetAll() want := []*testProducer{myProd1, myProd2} checkSlice(got, want, t) deleteAll() } func checkSlice(got []Producer, want []*testProducer, t *testing.T) { gotLen := len(got) wantLen := len(want) if gotLen != wantLen { t.Errorf("got len: %d want: %d\n", gotLen, wantLen) } else { gotMap := map[Producer]struct{}{} for i := 0; i < gotLen; i++ { gotMap[got[i]] = struct{}{} } for i := 0; i < wantLen; i++ { delete(gotMap, want[i]) } if len(gotMap) > 0 { t.Errorf("got %v, want %v\n", got, want) } } } func deleteAll() { pm.DeleteProducer(myProd1) pm.DeleteProducer(myProd2) pm.DeleteProducer(myProd3) } opencensus-go-0.24.0/metric/metricproducer/producer.go000066400000000000000000000016651433102037600230550ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metricproducer import ( "go.opencensus.io/metric/metricdata" ) // Producer is a source of metrics. type Producer interface { // Read should return the current values of all metrics supported by this // metric provider. // The returned metrics should be unique for each combination of name and // resource. Read() []*metricdata.Metric } opencensus-go-0.24.0/metric/registry.go000066400000000000000000000174541433102037600200560ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package metric import ( "sort" "sync" "time" "go.opencensus.io/metric/metricdata" ) // Registry creates and manages a set of gauges and cumulative. // External synchronization is required if you want to add gauges and cumulative to the same // registry from multiple goroutines. type Registry struct { baseMetrics sync.Map } type metricOptions struct { unit metricdata.Unit labelkeys []metricdata.LabelKey constLabels map[metricdata.LabelKey]metricdata.LabelValue desc string } // Options apply changes to metricOptions. type Options func(*metricOptions) // WithDescription applies provided description. func WithDescription(desc string) Options { return func(mo *metricOptions) { mo.desc = desc } } // WithUnit applies provided unit. func WithUnit(unit metricdata.Unit) Options { return func(mo *metricOptions) { mo.unit = unit } } // WithLabelKeys applies provided label. func WithLabelKeys(keys ...string) Options { return func(mo *metricOptions) { labelKeys := make([]metricdata.LabelKey, 0) for _, key := range keys { labelKeys = append(labelKeys, metricdata.LabelKey{Key: key}) } mo.labelkeys = labelKeys } } // WithLabelKeysAndDescription applies provided label. func WithLabelKeysAndDescription(labelKeys ...metricdata.LabelKey) Options { return func(mo *metricOptions) { mo.labelkeys = labelKeys } } // WithConstLabel applies provided constant label. func WithConstLabel(constLabels map[metricdata.LabelKey]metricdata.LabelValue) Options { return func(mo *metricOptions) { mo.constLabels = constLabels } } // NewRegistry initializes a new Registry. func NewRegistry() *Registry { return &Registry{} } // AddFloat64Gauge creates and adds a new float64-valued gauge to this registry. func (r *Registry) AddFloat64Gauge(name string, mos ...Options) (*Float64Gauge, error) { f := &Float64Gauge{ bm: baseMetric{ bmType: gaugeFloat64, }, } _, err := r.initBaseMetric(&f.bm, name, mos...) if err != nil { return nil, err } return f, nil } // AddInt64Gauge creates and adds a new int64-valued gauge to this registry. func (r *Registry) AddInt64Gauge(name string, mos ...Options) (*Int64Gauge, error) { i := &Int64Gauge{ bm: baseMetric{ bmType: gaugeInt64, }, } _, err := r.initBaseMetric(&i.bm, name, mos...) if err != nil { return nil, err } return i, nil } // AddInt64DerivedGauge creates and adds a new derived int64-valued gauge to this registry. // A derived gauge is convenient form of gauge where the object associated with the gauge // provides its value by implementing func() int64. func (r *Registry) AddInt64DerivedGauge(name string, mos ...Options) (*Int64DerivedGauge, error) { i := &Int64DerivedGauge{ bm: baseMetric{ bmType: derivedGaugeInt64, }, } _, err := r.initBaseMetric(&i.bm, name, mos...) if err != nil { return nil, err } return i, nil } // AddFloat64DerivedGauge creates and adds a new derived float64-valued gauge to this registry. // A derived gauge is convenient form of gauge where the object associated with the gauge // provides its value by implementing func() float64. func (r *Registry) AddFloat64DerivedGauge(name string, mos ...Options) (*Float64DerivedGauge, error) { f := &Float64DerivedGauge{ bm: baseMetric{ bmType: derivedGaugeFloat64, }, } _, err := r.initBaseMetric(&f.bm, name, mos...) if err != nil { return nil, err } return f, nil } func bmTypeToMetricType(bm *baseMetric) metricdata.Type { switch bm.bmType { case derivedGaugeFloat64: return metricdata.TypeGaugeFloat64 case derivedGaugeInt64: return metricdata.TypeGaugeInt64 case gaugeFloat64: return metricdata.TypeGaugeFloat64 case gaugeInt64: return metricdata.TypeGaugeInt64 case derivedCumulativeFloat64: return metricdata.TypeCumulativeFloat64 case derivedCumulativeInt64: return metricdata.TypeCumulativeInt64 case cumulativeFloat64: return metricdata.TypeCumulativeFloat64 case cumulativeInt64: return metricdata.TypeCumulativeInt64 default: panic("unsupported metric type") } } // AddFloat64Cumulative creates and adds a new float64-valued cumulative to this registry. func (r *Registry) AddFloat64Cumulative(name string, mos ...Options) (*Float64Cumulative, error) { f := &Float64Cumulative{ bm: baseMetric{ bmType: cumulativeFloat64, }, } _, err := r.initBaseMetric(&f.bm, name, mos...) if err != nil { return nil, err } return f, nil } // AddInt64Cumulative creates and adds a new int64-valued cumulative to this registry. func (r *Registry) AddInt64Cumulative(name string, mos ...Options) (*Int64Cumulative, error) { i := &Int64Cumulative{ bm: baseMetric{ bmType: cumulativeInt64, }, } _, err := r.initBaseMetric(&i.bm, name, mos...) if err != nil { return nil, err } return i, nil } // AddInt64DerivedCumulative creates and adds a new derived int64-valued cumulative to this registry. // A derived cumulative is convenient form of cumulative where the object associated with the cumulative // provides its value by implementing func() int64. func (r *Registry) AddInt64DerivedCumulative(name string, mos ...Options) (*Int64DerivedCumulative, error) { i := &Int64DerivedCumulative{ bm: baseMetric{ bmType: derivedCumulativeInt64, }, } _, err := r.initBaseMetric(&i.bm, name, mos...) if err != nil { return nil, err } return i, nil } // AddFloat64DerivedCumulative creates and adds a new derived float64-valued gauge to this registry. // A derived cumulative is convenient form of cumulative where the object associated with the cumulative // provides its value by implementing func() float64. func (r *Registry) AddFloat64DerivedCumulative(name string, mos ...Options) (*Float64DerivedCumulative, error) { f := &Float64DerivedCumulative{ bm: baseMetric{ bmType: derivedCumulativeFloat64, }, } _, err := r.initBaseMetric(&f.bm, name, mos...) if err != nil { return nil, err } return f, nil } func createMetricOption(mos ...Options) *metricOptions { o := &metricOptions{} for _, mo := range mos { mo(o) } return o } func (r *Registry) initBaseMetric(bm *baseMetric, name string, mos ...Options) (*baseMetric, error) { val, ok := r.baseMetrics.Load(name) if ok { existing := val.(*baseMetric) if existing.bmType != bm.bmType { return nil, errMetricExistsWithDiffType } } bm.start = time.Now() o := createMetricOption(mos...) var constLabelKeys []metricdata.LabelKey for k := range o.constLabels { constLabelKeys = append(constLabelKeys, k) } sort.Slice(constLabelKeys, func(i, j int) bool { return constLabelKeys[i].Key < constLabelKeys[j].Key }) var constLabelValues []metricdata.LabelValue for _, k := range constLabelKeys { constLabelValues = append(constLabelValues, o.constLabels[k]) } bm.keys = append(constLabelKeys, o.labelkeys...) bm.constLabelValues = constLabelValues bm.desc = metricdata.Descriptor{ Name: name, Description: o.desc, Unit: o.unit, LabelKeys: bm.keys, Type: bmTypeToMetricType(bm), } r.baseMetrics.Store(name, bm) return bm, nil } // Read reads all gauges and cumulatives in this registry and returns their values as metrics. func (r *Registry) Read() []*metricdata.Metric { ms := []*metricdata.Metric{} r.baseMetrics.Range(func(k, v interface{}) bool { bm := v.(*baseMetric) ms = append(ms, bm.read()) return true }) return ms } opencensus-go-0.24.0/metric/test/000077500000000000000000000000001433102037600166235ustar00rootroot00000000000000opencensus-go-0.24.0/metric/test/doc.go000066400000000000000000000012671433102037600177250ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Package test for testing code instrumented with the metric and stats packages. package test opencensus-go-0.24.0/metric/test/exporter.go000066400000000000000000000065131433102037600210270ustar00rootroot00000000000000package test import ( "context" "fmt" "sort" "strings" "time" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" "go.opencensus.io/stats/view" ) // Exporter keeps exported metric data in memory to aid in testing the instrumentation. // // Metrics can be retrieved with `GetPoint()`. In order to deterministically retrieve the most recent values, you must first invoke `ReadAndExport()`. type Exporter struct { // points is a map from a label signature to the latest value for the time series represented by the signature. // Use function `labelSignature` to get a signature from a `metricdata.Metric`. points map[string]metricdata.Point metricReader *metricexport.Reader } var _ metricexport.Exporter = &Exporter{} // NewExporter returns a new exporter. func NewExporter(metricReader *metricexport.Reader) *Exporter { return &Exporter{points: make(map[string]metricdata.Point), metricReader: metricReader} } // ExportMetrics records the view data. func (e *Exporter) ExportMetrics(ctx context.Context, data []*metricdata.Metric) error { for _, metric := range data { for _, ts := range metric.TimeSeries { signature := labelSignature(metric.Descriptor.Name, labelObjectsToKeyValue(metric.Descriptor.LabelKeys, ts.LabelValues)) e.points[signature] = ts.Points[len(ts.Points)-1] } } return nil } // GetPoint returns the latest point for the time series identified by the given labels. func (e *Exporter) GetPoint(metricName string, labels map[string]string) (metricdata.Point, bool) { v, ok := e.points[labelSignature(metricName, labelMapToKeyValue(labels))] return v, ok } // ReadAndExport reads the current values for all metrics and makes them available to this exporter. func (e *Exporter) ReadAndExport() { // The next line forces the view worker to process all stats.Record* calls that // happened within Store() before the call to ReadAndExport below. This abuses the // worker implementation to work around lack of synchronization. // TODO(jkohen,rghetia): figure out a clean way to make this deterministic. view.SetReportingPeriod(time.Minute) e.metricReader.ReadAndExport(e) } // String defines the “native” format for the exporter. func (e *Exporter) String() string { return fmt.Sprintf("points{%v}", e.points) } type keyValue struct { Key string Value string } func sortKeyValue(kv []keyValue) { sort.Slice(kv, func(i, j int) bool { return kv[i].Key < kv[j].Key }) } func labelMapToKeyValue(labels map[string]string) []keyValue { kv := make([]keyValue, 0, len(labels)) for k, v := range labels { kv = append(kv, keyValue{Key: k, Value: v}) } sortKeyValue(kv) return kv } func labelObjectsToKeyValue(keys []metricdata.LabelKey, values []metricdata.LabelValue) []keyValue { if len(keys) != len(values) { panic("keys and values must have the same length") } kv := make([]keyValue, 0, len(values)) for i := range keys { if values[i].Present { kv = append(kv, keyValue{Key: keys[i].Key, Value: values[i].Value}) } } sortKeyValue(kv) return kv } // labelSignature returns a string that uniquely identifies the list of labels given in the input. func labelSignature(metricName string, kv []keyValue) string { var builder strings.Builder for _, x := range kv { builder.WriteString(x.Key) builder.WriteString(x.Value) } return fmt.Sprintf("%s{%s}", metricName, builder.String()) } opencensus-go-0.24.0/metric/test/exporter_test.go000066400000000000000000000043331433102037600220640ustar00rootroot00000000000000package test import ( "context" "fmt" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" ) var ( myTag = tag.MustNewKey("my_label") myMetric = stats.Int64("my_metric", "description", stats.UnitDimensionless) ) func init() { if err := view.Register( &view.View{ Measure: myMetric, TagKeys: []tag.Key{myTag}, Aggregation: view.Sum(), }, ); err != nil { panic(err) } } func ExampleExporter_stats() { metricReader := metricexport.NewReader() metrics := NewExporter(metricReader) metrics.ReadAndExport() metricBase := getCounter(metrics, myMetric.Name(), newMetricKey("label1")) for i := 1; i <= 3; i++ { // The code under test begins here. stats.RecordWithTags(context.Background(), []tag.Mutator{tag.Upsert(myTag, "label1")}, myMetric.M(int64(i))) // The code under test ends here. metrics.ReadAndExport() metricValue := getCounter(metrics, myMetric.Name(), newMetricKey("label1")) fmt.Printf("increased by %d\n", metricValue-metricBase) } // Output: // increased by 1 // increased by 3 // increased by 6 } type derivedMetric struct { i int64 } func (m *derivedMetric) ToInt64() int64 { return m.i } func ExampleExporter_metric() { metricReader := metricexport.NewReader() metrics := NewExporter(metricReader) m := derivedMetric{} r := metric.NewRegistry() g, _ := r.AddInt64DerivedCumulative("derived", metric.WithLabelKeys(myTag.Name())) g.UpsertEntry(m.ToInt64, metricdata.NewLabelValue("l1")) for i := 1; i <= 3; i++ { // The code under test begins here. m.i = int64(i) // The code under test ends here. metrics.ExportMetrics(context.Background(), r.Read()) metricValue := getCounter(metrics, "derived", newMetricKey("l1")) fmt.Println(metricValue) } // Output: // 1 // 2 // 3 } func newMetricKey(v string) map[string]string { return map[string]string{myTag.Name(): v} } func getCounter(metrics *Exporter, metricName string, metricKey map[string]string) int64 { p, ok := metrics.GetPoint(metricName, metricKey) if !ok { // This is expected before the metric is recorded the first time. return 0 } return p.Value.(int64) } opencensus-go-0.24.0/opencensus.go000066400000000000000000000014531433102037600170750ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package opencensus contains Go support for OpenCensus. package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { return "0.24.0" } opencensus-go-0.24.0/plugin/000077500000000000000000000000001433102037600156575ustar00rootroot00000000000000opencensus-go-0.24.0/plugin/ocgrpc/000077500000000000000000000000001433102037600171345ustar00rootroot00000000000000opencensus-go-0.24.0/plugin/ocgrpc/benchmark_test.go000066400000000000000000000037031433102037600224570ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocgrpc import ( "testing" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) func BenchmarkStatusCodeToString_OK(b *testing.B) { st := status.New(codes.OK, "OK") for i := 0; i < b.N; i++ { s := statusCodeToString(st) _ = s } } func BenchmarkStatusCodeToString_Unauthenticated(b *testing.B) { st := status.New(codes.Unauthenticated, "Unauthenticated") for i := 0; i < b.N; i++ { s := statusCodeToString(st) _ = s } } var codeToStringMap = map[codes.Code]string{ codes.OK: "OK", codes.Canceled: "CANCELLED", codes.Unknown: "UNKNOWN", codes.InvalidArgument: "INVALID_ARGUMENT", codes.DeadlineExceeded: "DEADLINE_EXCEEDED", codes.NotFound: "NOT_FOUND", codes.AlreadyExists: "ALREADY_EXISTS", codes.PermissionDenied: "PERMISSION_DENIED", codes.ResourceExhausted: "RESOURCE_EXHAUSTED", codes.FailedPrecondition: "FAILED_PRECONDITION", codes.Aborted: "ABORTED", codes.OutOfRange: "OUT_OF_RANGE", codes.Unimplemented: "UNIMPLEMENTED", codes.Internal: "INTERNAL", codes.Unavailable: "UNAVAILABLE", codes.DataLoss: "DATA_LOSS", codes.Unauthenticated: "UNAUTHENTICATED", } func BenchmarkMapAlternativeImpl_OK(b *testing.B) { st := status.New(codes.OK, "OK") for i := 0; i < b.N; i++ { _ = codeToStringMap[st.Code()] } } opencensus-go-0.24.0/plugin/ocgrpc/client.go000066400000000000000000000034071433102037600207450ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocgrpc import ( "context" "go.opencensus.io/trace" "google.golang.org/grpc/stats" ) // ClientHandler implements a gRPC stats.Handler for recording OpenCensus stats and // traces. Use with gRPC clients only. type ClientHandler struct { // StartOptions allows configuring the StartOptions used to create new spans. // // StartOptions.SpanKind will always be set to trace.SpanKindClient // for spans started by this handler. StartOptions trace.StartOptions } // HandleConn exists to satisfy gRPC stats.Handler. func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { // no-op } // TagConn exists to satisfy gRPC stats.Handler. func (c *ClientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { // no-op return ctx } // HandleRPC implements per-RPC tracing and stats instrumentation. func (c *ClientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { traceHandleRPC(ctx, rs) statsHandleRPC(ctx, rs) } // TagRPC implements per-RPC context management. func (c *ClientHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { ctx = c.traceTagRPC(ctx, rti) ctx = c.statsTagRPC(ctx, rti) return ctx } opencensus-go-0.24.0/plugin/ocgrpc/client_metrics.go000066400000000000000000000123151433102037600224710ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" ) // The following variables are measures are recorded by ClientHandler: var ( ClientSentMessagesPerRPC = stats.Int64("grpc.io/client/sent_messages_per_rpc", "Number of messages sent in the RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) ClientSentBytesPerRPC = stats.Int64("grpc.io/client/sent_bytes_per_rpc", "Total bytes sent across all request messages per RPC.", stats.UnitBytes) ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) ) // Predefined views may be registered to collect data for the above measures. // As always, you may also define your own custom views over measures collected by this // package. These are declared as a convenience only; none are registered by // default. var ( ClientSentBytesPerRPCView = &view.View{ Measure: ClientSentBytesPerRPC, Name: "grpc.io/client/sent_bytes_per_rpc", Description: "Distribution of bytes sent per RPC, by method.", TagKeys: []tag.Key{KeyClientMethod}, Aggregation: DefaultBytesDistribution, } ClientReceivedBytesPerRPCView = &view.View{ Measure: ClientReceivedBytesPerRPC, Name: "grpc.io/client/received_bytes_per_rpc", Description: "Distribution of bytes received per RPC, by method.", TagKeys: []tag.Key{KeyClientMethod}, Aggregation: DefaultBytesDistribution, } ClientRoundtripLatencyView = &view.View{ Measure: ClientRoundtripLatency, Name: "grpc.io/client/roundtrip_latency", Description: "Distribution of round-trip latency, by method.", TagKeys: []tag.Key{KeyClientMethod}, Aggregation: DefaultMillisecondsDistribution, } // Purposely reuses the count from `ClientRoundtripLatency`, tagging // with method and status to result in ClientCompletedRpcs. ClientCompletedRPCsView = &view.View{ Measure: ClientRoundtripLatency, Name: "grpc.io/client/completed_rpcs", Description: "Count of RPCs by method and status.", TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, Aggregation: view.Count(), } ClientStartedRPCsView = &view.View{ Measure: ClientStartedRPCs, Name: "grpc.io/client/started_rpcs", Description: "Number of started client RPCs.", TagKeys: []tag.Key{KeyClientMethod}, Aggregation: view.Count(), } ClientSentMessagesPerRPCView = &view.View{ Measure: ClientSentMessagesPerRPC, Name: "grpc.io/client/sent_messages_per_rpc", Description: "Distribution of sent messages count per RPC, by method.", TagKeys: []tag.Key{KeyClientMethod}, Aggregation: DefaultMessageCountDistribution, } ClientReceivedMessagesPerRPCView = &view.View{ Measure: ClientReceivedMessagesPerRPC, Name: "grpc.io/client/received_messages_per_rpc", Description: "Distribution of received messages count per RPC, by method.", TagKeys: []tag.Key{KeyClientMethod}, Aggregation: DefaultMessageCountDistribution, } ClientServerLatencyView = &view.View{ Measure: ClientServerLatency, Name: "grpc.io/client/server_latency", Description: "Distribution of server latency as viewed by client, by method.", TagKeys: []tag.Key{KeyClientMethod}, Aggregation: DefaultMillisecondsDistribution, } ) // DefaultClientViews are the default client views provided by this package. var DefaultClientViews = []*view.View{ ClientSentBytesPerRPCView, ClientReceivedBytesPerRPCView, ClientRoundtripLatencyView, ClientCompletedRPCsView, } // TODO(jbd): Add roundtrip_latency, uncompressed_request_bytes, uncompressed_response_bytes, request_count, response_count. // TODO(acetechnologist): This is temporary and will need to be replaced by a // mechanism to load these defaults from a common repository/config shared by // all supported languages. Likely a serialized protobuf of these defaults. opencensus-go-0.24.0/plugin/ocgrpc/client_spec_test.go000066400000000000000000000141231433102037600230130ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "regexp" "strings" "testing" "go.opencensus.io/stats" "go.opencensus.io/stats/view" ) var colSep = regexp.MustCompile(`\s*\|\s*`) func TestSpecClientMeasures(t *testing.T) { spec := ` | Measure name | Unit | Description | |------------------------------------------|------|-----------------------------------------------------------------------------------------------| | grpc.io/client/sent_messages_per_rpc | 1 | Number of messages sent in the RPC (always 1 for non-streaming RPCs). | | grpc.io/client/sent_bytes_per_rpc | By | Total bytes sent across all request messages per RPC. | | grpc.io/client/received_messages_per_rpc | 1 | Number of response messages received per RPC (always 1 for non-streaming RPCs). | | grpc.io/client/received_bytes_per_rpc | By | Total bytes received across all response messages per RPC. | | grpc.io/client/roundtrip_latency | ms | Time between first byte of request sent to last byte of response received, or terminal error. | | grpc.io/client/server_latency | ms | Propagated from the server and should have the same value as "grpc.io/server/latency". |` lines := strings.Split(spec, "\n")[3:] type measureDef struct { name string unit string desc string } measureDefs := make([]measureDef, 0, len(lines)) for _, line := range lines { cols := colSep.Split(line, -1)[1:] if len(cols) < 3 { t.Fatalf("Invalid config line %#v", cols) } measureDefs = append(measureDefs, measureDef{cols[0], cols[1], cols[2]}) } gotMeasures := []stats.Measure{ ClientSentMessagesPerRPC, ClientSentBytesPerRPC, ClientReceivedMessagesPerRPC, ClientReceivedBytesPerRPC, ClientRoundtripLatency, ClientServerLatency, } if got, want := len(gotMeasures), len(measureDefs); got != want { t.Fatalf("len(gotMeasures) = %d; want %d", got, want) } for i, m := range gotMeasures { defn := measureDefs[i] if got, want := m.Name(), defn.name; got != want { t.Errorf("Name = %q; want %q", got, want) } if got, want := m.Unit(), defn.unit; got != want { t.Errorf("%q: Unit = %q; want %q", defn.name, got, want) } if got, want := m.Description(), defn.desc; got != want { t.Errorf("%q: Description = %q; want %q", defn.name, got, want) } } } func TestSpecClientViews(t *testing.T) { defaultViewsSpec := ` | View name | Measure suffix | Aggregation | Tags | |---------------------------------------|------------------------|--------------|------------------------------| | grpc.io/client/sent_bytes_per_rpc | sent_bytes_per_rpc | distribution | client_method | | grpc.io/client/received_bytes_per_rpc | received_bytes_per_rpc | distribution | client_method | | grpc.io/client/roundtrip_latency | roundtrip_latency | distribution | client_method | | grpc.io/client/completed_rpcs | roundtrip_latency | count | client_method, client_status |` extraViewsSpec := ` | View name | Measure suffix | Aggregation | Tags suffix | |------------------------------------------|---------------------------|--------------|---------------| | grpc.io/client/sent_messages_per_rpc | sent_messages_per_rpc | distribution | client_method | | grpc.io/client/received_messages_per_rpc | received_messages_per_rpc | distribution | client_method | | grpc.io/client/server_latency | server_latency | distribution | client_method |` lines := strings.Split(defaultViewsSpec, "\n")[3:] lines = append(lines, strings.Split(extraViewsSpec, "\n")[3:]...) type viewDef struct { name string measureSuffix string aggregation string tags string } viewDefs := make([]viewDef, 0, len(lines)) for _, line := range lines { cols := colSep.Split(line, -1)[1:] if len(cols) < 4 { t.Fatalf("Invalid config line %#v", cols) } viewDefs = append(viewDefs, viewDef{cols[0], cols[1], cols[2], cols[3]}) } views := DefaultClientViews views = append(views, ClientSentMessagesPerRPCView, ClientReceivedMessagesPerRPCView, ClientServerLatencyView) if got, want := len(views), len(viewDefs); got != want { t.Fatalf("len(gotMeasures) = %d; want %d", got, want) } for i, v := range views { defn := viewDefs[i] if got, want := v.Name, defn.name; got != want { t.Errorf("Name = %q; want %q", got, want) } if got, want := v.Measure.Name(), "grpc.io/client/"+defn.measureSuffix; got != want { t.Errorf("%q: Measure.Name = %q; want %q", defn.name, got, want) } switch v.Aggregation.Type { case view.AggTypeDistribution: if got, want := "distribution", defn.aggregation; got != want { t.Errorf("%q: Description = %q; want %q", defn.name, got, want) } case view.AggTypeCount: if got, want := "count", defn.aggregation; got != want { t.Errorf("%q: Description = %q; want %q", defn.name, got, want) } default: t.Errorf("Invalid aggregation type") } wantTags := strings.Split(defn.tags, ", ") if got, want := len(v.TagKeys), len(wantTags); got != want { t.Errorf("len(TagKeys) = %d; want %d", got, want) } for j := range wantTags { if got, want := v.TagKeys[j].Name(), "grpc_"+wantTags[j]; got != want { t.Errorf("TagKeys[%d].Name() = %q; want %q", j, got, want) } } } } opencensus-go-0.24.0/plugin/ocgrpc/client_stats_handler.go000066400000000000000000000025301433102037600236540ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "context" "time" "go.opencensus.io/tag" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" ) // statsTagRPC gets the tag.Map populated by the application code, serializes // its tags into the GRPC metadata in order to be sent to the server. func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { startTime := time.Now() if info == nil { if grpclog.V(2) { grpclog.Info("clientHandler.TagRPC called with nil info.") } return ctx } d := &rpcData{ startTime: startTime, method: info.FullMethodName, } ts := tag.FromContext(ctx) if ts != nil { encoded := tag.Encode(ts) ctx = stats.SetTags(ctx, encoded) } return context.WithValue(ctx, rpcDataKey, d) } opencensus-go-0.24.0/plugin/ocgrpc/client_stats_handler_test.go000066400000000000000000000256661433102037600247320ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "reflect" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "context" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "google.golang.org/grpc/stats" ) func TestClientDefaultCollections(t *testing.T) { k1 := tag.MustNewKey("k1") k2 := tag.MustNewKey("k2") type tagPair struct { k tag.Key v string } type wantData struct { v func() *view.View rows []*view.Row } type rpc struct { tags []tagPair tagInfo *stats.RPCTagInfo inPayloads []*stats.InPayload outPayloads []*stats.OutPayload end *stats.End } type testCase struct { label string rpcs []*rpc wants []*wantData } tcs := []testCase{ { label: "1", rpcs: []*rpc{ { []tagPair{{k1, "v1"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 10}, }, []*stats.OutPayload{ {Length: 10}, }, &stats.End{Error: nil}, }, }, wants: []*wantData{ { func() *view.View { return ClientSentMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), }, }, }, { func() *view.View { return ClientReceivedMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), }, }, }, { func() *view.View { return ClientSentBytesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), }, }, }, { func() *view.View { return ClientReceivedBytesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), }, }, }, }, }, { label: "2", rpcs: []*rpc{ { []tagPair{{k1, "v1"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 10}, }, []*stats.OutPayload{ {Length: 10}, {Length: 10}, {Length: 10}, }, &stats.End{Error: nil}, }, { []tagPair{{k1, "v11"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 10}, {Length: 10}, }, []*stats.OutPayload{ {Length: 10}, {Length: 10}, }, &stats.End{Error: status.Error(codes.Canceled, "canceled")}, }, }, wants: []*wantData{ { func() *view.View { return ClientSentMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5), }, }, }, { func() *view.View { return ClientReceivedMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5), }, }, }, }, }, { label: "3", rpcs: []*rpc{ { []tagPair{{k1, "v1"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 1}, }, []*stats.OutPayload{ {Length: 1}, {Length: 1024}, {Length: 65536}, }, &stats.End{Error: nil}, }, { []tagPair{{k1, "v1"}, {k2, "v2"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 1024}, }, []*stats.OutPayload{ {Length: 4096}, {Length: 16384}, }, &stats.End{Error: status.Error(codes.Canceled, "canceled")}, }, { []tagPair{{k1, "v11"}, {k2, "v22"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 2048}, {Length: 16384}, }, []*stats.OutPayload{ {Length: 2048}, {Length: 4096}, {Length: 16384}, }, &stats.End{Error: status.Error(codes.Aborted, "aborted")}, }, }, wants: []*wantData{ { func() *view.View { return ClientSentMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2), }, }, }, { func() *view.View { return ClientReceivedMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2), }, }, }, { func() *view.View { return ClientSentBytesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 0, 0, 0, 2 /*16384*/, 1 /*65536*/, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09), }, }, }, { func() *view.View { return ClientReceivedBytesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.666667, 2.1459558466666666e+08), }, }, }, }, }, } views := []*view.View{ ClientSentBytesPerRPCView, ClientReceivedBytesPerRPCView, ClientRoundtripLatencyView, ClientCompletedRPCsView, ClientSentMessagesPerRPCView, ClientReceivedMessagesPerRPCView, } for _, tc := range tcs { // Register views. if err := view.Register(views...); err != nil { t.Error(err) } h := &ClientHandler{} h.StartOptions.Sampler = trace.NeverSample() for _, rpc := range tc.rpcs { var mods []tag.Mutator for _, t := range rpc.tags { mods = append(mods, tag.Upsert(t.k, t.v)) } ctx, err := tag.New(context.Background(), mods...) if err != nil { t.Errorf("%q: NewMap = %v", tc.label, err) } encoded := tag.Encode(tag.FromContext(ctx)) ctx = stats.SetTags(context.Background(), encoded) ctx = h.TagRPC(ctx, rpc.tagInfo) for _, out := range rpc.outPayloads { out.Client = true h.HandleRPC(ctx, out) } for _, in := range rpc.inPayloads { in.Client = true h.HandleRPC(ctx, in) } rpc.end.Client = true h.HandleRPC(ctx, rpc.end) } for _, wantData := range tc.wants { gotRows, err := view.RetrieveData(wantData.v().Name) if err != nil { t.Errorf("%q: RetrieveData(%q) = %v", tc.label, wantData.v().Name, err) continue } for i := range gotRows { view.ClearStart(gotRows[i].Data) } for _, gotRow := range gotRows { if !containsRow(wantData.rows, gotRow) { t.Errorf("%q: unwanted row for view %q = %v", tc.label, wantData.v().Name, gotRow) break } } for _, wantRow := range wantData.rows { if !containsRow(gotRows, wantRow) { t.Errorf("%q: row missing for view %q; want %v", tc.label, wantData.v().Name, wantRow) break } } } // Unregister views to cleanup. view.Unregister(views...) } } func TestClientRecordExemplar(t *testing.T) { key := tag.MustNewKey("test_key") tagInfo := &stats.RPCTagInfo{FullMethodName: "/package.service/method"} out := &stats.OutPayload{Length: 2000} end := &stats.End{Error: nil} if err := view.Register(ClientSentBytesPerRPCView); err != nil { t.Error(err) } h := &ClientHandler{} h.StartOptions.Sampler = trace.AlwaysSample() ctx, err := tag.New(context.Background(), tag.Upsert(key, "test_val")) if err != nil { t.Error(err) } encoded := tag.Encode(tag.FromContext(ctx)) ctx = stats.SetTags(context.Background(), encoded) ctx = h.TagRPC(ctx, tagInfo) out.Client = true h.HandleRPC(ctx, out) end.Client = true h.HandleRPC(ctx, end) span := trace.FromContext(ctx) if span == nil { t.Fatal("expected non-nil span, got nil") } if !span.IsRecordingEvents() { t.Errorf("span should be sampled") } attachments := map[string]interface{}{metricdata.AttachmentKeySpanContext: span.SpanContext()} wantExemplar := &metricdata.Exemplar{Value: 2000, Attachments: attachments} rows, err := view.RetrieveData(ClientSentBytesPerRPCView.Name) if err != nil { t.Fatal("Error RetrieveData ", err) } if len(rows) == 0 { t.Fatal("No data was recorded.") } data := rows[0].Data dis, ok := data.(*view.DistributionData) if !ok { t.Fatal("want DistributionData, got ", data) } // Only recorded value is 2000, which falls into the second bucket (1024, 2048]. wantBuckets := []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} if !reflect.DeepEqual(dis.CountPerBucket, wantBuckets) { t.Errorf("want buckets %v, got %v", wantBuckets, dis.CountPerBucket) } for i, e := range dis.ExemplarsPerBucket { // Only the second bucket should have an exemplar. if i == 1 { if diff := cmpExemplar(e, wantExemplar); diff != "" { t.Fatalf("Unexpected Exemplar -got +want: %s", diff) } } else if e != nil { t.Errorf("want nil exemplar, got %v", e) } } // Unregister views to cleanup. view.Unregister(ClientSentBytesPerRPCView) } // containsRow returns true if rows contain r. func containsRow(rows []*view.Row, r *view.Row) bool { for _, x := range rows { if r.Equal(x) { return true } } return false } // Compare exemplars while ignoring exemplar timestamp, since timestamp is non-deterministic. func cmpExemplar(got, want *metricdata.Exemplar) string { return cmp.Diff(got, want, cmpopts.IgnoreFields(metricdata.Exemplar{}, "Timestamp"), cmpopts.IgnoreUnexported(metricdata.Exemplar{})) } opencensus-go-0.24.0/plugin/ocgrpc/doc.go000066400000000000000000000014421433102037600202310ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package ocgrpc contains OpenCensus stats and trace // integrations for gRPC. // // Use ServerHandler for servers and ClientHandler for clients. package ocgrpc // import "go.opencensus.io/plugin/ocgrpc" opencensus-go-0.24.0/plugin/ocgrpc/end_to_end_test.go000066400000000000000000000175211433102037600226260ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc_test import ( "context" "io" "reflect" "testing" "go.opencensus.io/internal/testpb" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" "go.opencensus.io/tag" ) var keyAccountId = tag.MustNewKey("account_id") func TestEndToEnd_Single(t *testing.T) { view.Register(ocgrpc.DefaultClientViews...) defer view.Unregister(ocgrpc.DefaultClientViews...) view.Register(ocgrpc.DefaultServerViews...) defer view.Unregister(ocgrpc.DefaultServerViews...) extraViews := []*view.View{ ocgrpc.ServerReceivedMessagesPerRPCView, ocgrpc.ClientReceivedMessagesPerRPCView, ocgrpc.ServerSentMessagesPerRPCView, ocgrpc.ClientSentMessagesPerRPCView, ocgrpc.ServerStartedRPCsView, ocgrpc.ClientStartedRPCsView, } view.Register(extraViews...) defer view.Unregister(extraViews...) client, done := testpb.NewTestClient(t) defer done() ctx := context.Background() ctx, _ = tag.New(ctx, tag.Insert(keyAccountId, "abc123")) var ( clientMethodTag = tag.Tag{Key: ocgrpc.KeyClientMethod, Value: "testpb.Foo/Single"} serverMethodTag = tag.Tag{Key: ocgrpc.KeyServerMethod, Value: "testpb.Foo/Single"} clientStatusOKTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "OK"} serverStatusOKTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "OK"} serverStatusUnknownTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "UNKNOWN"} clientStatusUnknownTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "UNKNOWN"} ) _, err := client.Single(ctx, &testpb.FooRequest{}) if err != nil { t.Fatal(err) } checkCount(t, ocgrpc.ClientStartedRPCsView, 1, clientMethodTag) checkCount(t, ocgrpc.ServerStartedRPCsView, 1, serverMethodTag) checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, clientStatusOKTag) checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, serverStatusOKTag) _, _ = client.Single(ctx, &testpb.FooRequest{Fail: true}) checkCount(t, ocgrpc.ClientStartedRPCsView, 2, clientMethodTag) checkCount(t, ocgrpc.ServerStartedRPCsView, 2, serverMethodTag) checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, serverStatusUnknownTag) checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, clientStatusUnknownTag) tcs := []struct { v *view.View tags []tag.Tag mean float64 }{ {ocgrpc.ClientSentMessagesPerRPCView, []tag.Tag{clientMethodTag}, 1.0}, {ocgrpc.ServerReceivedMessagesPerRPCView, []tag.Tag{serverMethodTag}, 1.0}, {ocgrpc.ClientReceivedMessagesPerRPCView, []tag.Tag{clientMethodTag}, 0.5}, {ocgrpc.ServerSentMessagesPerRPCView, []tag.Tag{serverMethodTag}, 0.5}, {ocgrpc.ClientSentBytesPerRPCView, []tag.Tag{clientMethodTag}, 1.0}, {ocgrpc.ServerReceivedBytesPerRPCView, []tag.Tag{serverMethodTag}, 1.0}, {ocgrpc.ClientReceivedBytesPerRPCView, []tag.Tag{clientMethodTag}, 0.0}, {ocgrpc.ServerSentBytesPerRPCView, []tag.Tag{serverMethodTag}, 0.0}, } for _, tt := range tcs { t.Run("view="+tt.v.Name, func(t *testing.T) { dist := getDistribution(t, tt.v, tt.tags...) if got, want := dist.Count, int64(2); got != want { t.Errorf("Count = %d; want %d", got, want) } if got, want := dist.Mean, tt.mean; got != want { t.Errorf("Mean = %v; want %v", got, want) } }) } } func TestEndToEnd_Stream(t *testing.T) { view.Register(ocgrpc.DefaultClientViews...) defer view.Unregister(ocgrpc.DefaultClientViews...) view.Register(ocgrpc.DefaultServerViews...) defer view.Unregister(ocgrpc.DefaultServerViews...) extraViews := []*view.View{ ocgrpc.ServerReceivedMessagesPerRPCView, ocgrpc.ClientReceivedMessagesPerRPCView, ocgrpc.ServerSentMessagesPerRPCView, ocgrpc.ClientSentMessagesPerRPCView, ocgrpc.ClientStartedRPCsView, ocgrpc.ServerStartedRPCsView, } view.Register(extraViews...) defer view.Unregister(extraViews...) client, done := testpb.NewTestClient(t) defer done() ctx := context.Background() ctx, _ = tag.New(ctx, tag.Insert(keyAccountId, "abc123")) var ( clientMethodTag = tag.Tag{Key: ocgrpc.KeyClientMethod, Value: "testpb.Foo/Multiple"} serverMethodTag = tag.Tag{Key: ocgrpc.KeyServerMethod, Value: "testpb.Foo/Multiple"} clientStatusOKTag = tag.Tag{Key: ocgrpc.KeyClientStatus, Value: "OK"} serverStatusOKTag = tag.Tag{Key: ocgrpc.KeyServerStatus, Value: "OK"} ) const msgCount = 3 stream, err := client.Multiple(ctx) if err != nil { t.Fatal(err) } for i := 0; i < msgCount; i++ { stream.Send(&testpb.FooRequest{}) _, err := stream.Recv() if err != nil { t.Fatal(err) } } if err := stream.CloseSend(); err != nil { t.Fatal(err) } if _, err = stream.Recv(); err != io.EOF { t.Fatal(err) } checkCount(t, ocgrpc.ClientStartedRPCsView, 1, clientMethodTag) checkCount(t, ocgrpc.ServerStartedRPCsView, 1, serverMethodTag) checkCount(t, ocgrpc.ClientCompletedRPCsView, 1, clientMethodTag, clientStatusOKTag) checkCount(t, ocgrpc.ServerCompletedRPCsView, 1, serverMethodTag, serverStatusOKTag) tcs := []struct { v *view.View tag tag.Tag }{ {ocgrpc.ClientSentMessagesPerRPCView, clientMethodTag}, {ocgrpc.ServerReceivedMessagesPerRPCView, serverMethodTag}, {ocgrpc.ServerSentMessagesPerRPCView, serverMethodTag}, {ocgrpc.ClientReceivedMessagesPerRPCView, clientMethodTag}, } for _, tt := range tcs { serverSent := getDistribution(t, tt.v, tt.tag) if got, want := serverSent.Mean, float64(msgCount); got != want { t.Errorf("%q.Count = %v; want %v", ocgrpc.ServerSentMessagesPerRPCView.Name, got, want) } } } func checkCount(t *testing.T, v *view.View, want int64, tags ...tag.Tag) { if got, ok := getCount(t, v, tags...); ok && got != want { t.Errorf("View[name=%q].Row[tags=%v].Data = %d; want %d", v.Name, tags, got, want) } } func getCount(t *testing.T, v *view.View, tags ...tag.Tag) (int64, bool) { if len(tags) != len(v.TagKeys) { t.Errorf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags) return 0, false } for i := range v.TagKeys { if tags[i].Key != v.TagKeys[i] { t.Errorf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags) return 0, false } } rows, err := view.RetrieveData(v.Name) if err != nil { t.Fatal(err) } var foundRow *view.Row for _, row := range rows { if reflect.DeepEqual(row.Tags, tags) { foundRow = row break } } if foundRow == nil { var gotTags [][]tag.Tag for _, row := range rows { gotTags = append(gotTags, row.Tags) } t.Errorf("Failed to find row with keys %v among:\n%v", tags, gotTags) return 0, false } return foundRow.Data.(*view.CountData).Value, true } func getDistribution(t *testing.T, v *view.View, tags ...tag.Tag) *view.DistributionData { if len(tags) != len(v.TagKeys) { t.Fatalf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags) return nil } for i := range v.TagKeys { if tags[i].Key != v.TagKeys[i] { t.Fatalf("Invalid tag specification, want %#v tags got %#v", v.TagKeys, tags) return nil } } rows, err := view.RetrieveData(v.Name) if err != nil { t.Fatal(err) } var foundRow *view.Row for _, row := range rows { if reflect.DeepEqual(row.Tags, tags) { foundRow = row break } } if foundRow == nil { var gotTags [][]tag.Tag for _, row := range rows { gotTags = append(gotTags, row.Tags) } t.Fatalf("Failed to find row with keys %v among:\n%v", tags, gotTags) return nil } return foundRow.Data.(*view.DistributionData) } opencensus-go-0.24.0/plugin/ocgrpc/example_test.go000066400000000000000000000027171433102037600221640ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocgrpc_test import ( "log" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" "google.golang.org/grpc" ) func ExampleClientHandler() { // Register views to collect data. if err := view.Register(ocgrpc.DefaultClientViews...); err != nil { log.Fatal(err) } // Set up a connection to the server with the OpenCensus // stats handler to enable stats and tracing. conn, err := grpc.Dial("address", grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) if err != nil { log.Fatalf("did not connect: %v", err) } defer conn.Close() } func ExampleServerHandler() { // Register views to collect data. if err := view.Register(ocgrpc.DefaultServerViews...); err != nil { log.Fatal(err) } // Set up a new server with the OpenCensus // stats handler to enable stats and tracing. s := grpc.NewServer(grpc.StatsHandler(&ocgrpc.ServerHandler{})) _ = s // use s } opencensus-go-0.24.0/plugin/ocgrpc/grpc_test.go000066400000000000000000000065711433102037600214660ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocgrpc import ( "context" "sync" "testing" "time" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "go.opencensus.io/stats/view" "go.opencensus.io/trace" ) func TestClientHandler(t *testing.T) { ctx := context.Background() te := &traceExporter{} trace.RegisterExporter(te) if err := view.Register(ClientSentMessagesPerRPCView); err != nil { t.Fatal(err) } defer view.Unregister(ClientSentMessagesPerRPCView) ctx, _ = trace.StartSpan(ctx, "/foo", trace.WithSampler(trace.AlwaysSample())) var handler ClientHandler ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{ FullMethodName: "/service.foo/method", }) handler.HandleRPC(ctx, &stats.Begin{ Client: true, BeginTime: time.Now(), }) handler.HandleRPC(ctx, &stats.End{ Client: true, EndTime: time.Now(), }) stats, err := view.RetrieveData(ClientSentMessagesPerRPCView.Name) if err != nil { t.Fatal(err) } traces := te.buffer if got, want := len(stats), 1; got != want { t.Errorf("Got %v stats; want %v", got, want) } if got, want := len(traces), 1; got != want { t.Errorf("Got %v traces; want %v", got, want) } } func TestServerHandler(t *testing.T) { tests := []struct { name string newTrace bool expectTraces int }{ {"trust_metadata", false, 1}, {"no_trust_metadata", true, 0}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() handler := &ServerHandler{ IsPublicEndpoint: test.newTrace, StartOptions: trace.StartOptions{ Sampler: trace.ProbabilitySampler(0.0), }, } te := &traceExporter{} trace.RegisterExporter(te) if err := view.Register(ServerCompletedRPCsView); err != nil { t.Fatal(err) } md := metadata.MD{ "grpc-trace-bin": []string{string([]byte{0, 0, 62, 116, 14, 118, 117, 157, 126, 7, 114, 152, 102, 125, 235, 34, 114, 238, 1, 187, 201, 24, 210, 231, 20, 175, 241, 2, 1})}, } ctx = metadata.NewIncomingContext(ctx, md) ctx = handler.TagRPC(ctx, &stats.RPCTagInfo{ FullMethodName: "/service.foo/method", }) handler.HandleRPC(ctx, &stats.Begin{ BeginTime: time.Now(), }) handler.HandleRPC(ctx, &stats.End{ EndTime: time.Now(), }) rows, err := view.RetrieveData(ServerCompletedRPCsView.Name) if err != nil { t.Fatal(err) } traces := te.buffer if got, want := len(rows), 1; got != want { t.Errorf("Got %v rows; want %v", got, want) } if got, want := len(traces), test.expectTraces; got != want { t.Errorf("Got %v traces; want %v", got, want) } // Cleanup. view.Unregister(ServerCompletedRPCsView) }) } } type traceExporter struct { mu sync.Mutex buffer []*trace.SpanData } func (e *traceExporter) ExportSpan(sd *trace.SpanData) { e.mu.Lock() e.buffer = append(e.buffer, sd) e.mu.Unlock() } opencensus-go-0.24.0/plugin/ocgrpc/server.go000066400000000000000000000055361433102037600210020ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocgrpc import ( "context" "google.golang.org/grpc/stats" "go.opencensus.io/trace" ) // ServerHandler implements gRPC stats.Handler recording OpenCensus stats and // traces. Use with gRPC servers. // // When installed (see Example), tracing metadata is read from inbound RPCs // by default. If no tracing metadata is present, or if the tracing metadata is // present but the SpanContext isn't sampled, then a new trace may be started // (as determined by Sampler). type ServerHandler struct { // IsPublicEndpoint may be set to true to always start a new trace around // each RPC. Any SpanContext in the RPC metadata will be added as a linked // span instead of making it the parent of the span created around the // server RPC. // // Be aware that if you leave this false (the default) on a public-facing // server, callers will be able to send tracing metadata in gRPC headers // and trigger traces in your backend. IsPublicEndpoint bool // StartOptions to use for to spans started around RPCs handled by this server. // // These will apply even if there is tracing metadata already // present on the inbound RPC but the SpanContext is not sampled. This // ensures that each service has some opportunity to be traced. If you would // like to not add any additional traces for this gRPC service, set: // // StartOptions.Sampler = trace.ProbabilitySampler(0.0) // // StartOptions.SpanKind will always be set to trace.SpanKindServer // for spans started by this handler. StartOptions trace.StartOptions } var _ stats.Handler = (*ServerHandler)(nil) // HandleConn exists to satisfy gRPC stats.Handler. func (s *ServerHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { // no-op } // TagConn exists to satisfy gRPC stats.Handler. func (s *ServerHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { // no-op return ctx } // HandleRPC implements per-RPC tracing and stats instrumentation. func (s *ServerHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { traceHandleRPC(ctx, rs) statsHandleRPC(ctx, rs) } // TagRPC implements per-RPC context management. func (s *ServerHandler) TagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { ctx = s.traceTagRPC(ctx, rti) ctx = s.statsTagRPC(ctx, rti) return ctx } opencensus-go-0.24.0/plugin/ocgrpc/server_metrics.go000066400000000000000000000110761433102037600225240ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" ) // The following variables are measures are recorded by ServerHandler: var ( ServerReceivedMessagesPerRPC = stats.Int64("grpc.io/server/received_messages_per_rpc", "Number of messages received in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) ) // TODO(acetechnologist): This is temporary and will need to be replaced by a // mechanism to load these defaults from a common repository/config shared by // all supported languages. Likely a serialized protobuf of these defaults. // Predefined views may be registered to collect data for the above measures. // As always, you may also define your own custom views over measures collected by this // package. These are declared as a convenience only; none are registered by // default. var ( ServerReceivedBytesPerRPCView = &view.View{ Name: "grpc.io/server/received_bytes_per_rpc", Description: "Distribution of received bytes per RPC, by method.", Measure: ServerReceivedBytesPerRPC, TagKeys: []tag.Key{KeyServerMethod}, Aggregation: DefaultBytesDistribution, } ServerSentBytesPerRPCView = &view.View{ Name: "grpc.io/server/sent_bytes_per_rpc", Description: "Distribution of total sent bytes per RPC, by method.", Measure: ServerSentBytesPerRPC, TagKeys: []tag.Key{KeyServerMethod}, Aggregation: DefaultBytesDistribution, } ServerLatencyView = &view.View{ Name: "grpc.io/server/server_latency", Description: "Distribution of server latency in milliseconds, by method.", TagKeys: []tag.Key{KeyServerMethod}, Measure: ServerLatency, Aggregation: DefaultMillisecondsDistribution, } // Purposely reuses the count from `ServerLatency`, tagging // with method and status to result in ServerCompletedRpcs. ServerCompletedRPCsView = &view.View{ Name: "grpc.io/server/completed_rpcs", Description: "Count of RPCs by method and status.", TagKeys: []tag.Key{KeyServerMethod, KeyServerStatus}, Measure: ServerLatency, Aggregation: view.Count(), } ServerStartedRPCsView = &view.View{ Measure: ServerStartedRPCs, Name: "grpc.io/server/started_rpcs", Description: "Number of started server RPCs.", TagKeys: []tag.Key{KeyServerMethod}, Aggregation: view.Count(), } ServerReceivedMessagesPerRPCView = &view.View{ Name: "grpc.io/server/received_messages_per_rpc", Description: "Distribution of messages received count per RPC, by method.", TagKeys: []tag.Key{KeyServerMethod}, Measure: ServerReceivedMessagesPerRPC, Aggregation: DefaultMessageCountDistribution, } ServerSentMessagesPerRPCView = &view.View{ Name: "grpc.io/server/sent_messages_per_rpc", Description: "Distribution of messages sent count per RPC, by method.", TagKeys: []tag.Key{KeyServerMethod}, Measure: ServerSentMessagesPerRPC, Aggregation: DefaultMessageCountDistribution, } ) // DefaultServerViews are the default server views provided by this package. var DefaultServerViews = []*view.View{ ServerReceivedBytesPerRPCView, ServerSentBytesPerRPCView, ServerLatencyView, ServerCompletedRPCsView, } opencensus-go-0.24.0/plugin/ocgrpc/server_spec_test.go000066400000000000000000000133611433102037600230460ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "strings" "testing" "go.opencensus.io/stats" "go.opencensus.io/stats/view" ) func TestSpecServerMeasures(t *testing.T) { spec := ` | Measure name | Unit | Description | |------------------------------------------|------|-----------------------------------------------------------------------------------------------| | grpc.io/server/received_messages_per_rpc | 1 | Number of messages received in each RPC. Has value 1 for non-streaming RPCs. | | grpc.io/server/received_bytes_per_rpc | By | Total bytes received across all messages per RPC. | | grpc.io/server/sent_messages_per_rpc | 1 | Number of messages sent in each RPC. Has value 1 for non-streaming RPCs. | | grpc.io/server/sent_bytes_per_rpc | By | Total bytes sent in across all response messages per RPC. | | grpc.io/server/server_latency | ms | Time between first byte of request received to last byte of response sent, or terminal error. |` lines := strings.Split(spec, "\n")[3:] type measureDef struct { name string unit string desc string } measureDefs := make([]measureDef, 0, len(lines)) for _, line := range lines { cols := colSep.Split(line, -1)[1:] if len(cols) < 3 { t.Fatalf("Invalid config line %#v", cols) } measureDefs = append(measureDefs, measureDef{cols[0], cols[1], cols[2]}) } gotMeasures := []stats.Measure{ ServerReceivedMessagesPerRPC, ServerReceivedBytesPerRPC, ServerSentMessagesPerRPC, ServerSentBytesPerRPC, ServerLatency, } if got, want := len(gotMeasures), len(measureDefs); got != want { t.Fatalf("len(gotMeasures) = %d; want %d", got, want) } for i, m := range gotMeasures { defn := measureDefs[i] if got, want := m.Name(), defn.name; got != want { t.Errorf("Name = %q; want %q", got, want) } if got, want := m.Unit(), defn.unit; got != want { t.Errorf("%q: Unit = %q; want %q", defn.name, got, want) } if got, want := m.Description(), defn.desc; got != want { t.Errorf("%q: Description = %q; want %q", defn.name, got, want) } } } func TestSpecServerViews(t *testing.T) { defaultViewsSpec := ` | View name | Measure suffix | Aggregation | Tags suffix | |---------------------------------------|------------------------|--------------|------------------------------| | grpc.io/server/received_bytes_per_rpc | received_bytes_per_rpc | distribution | server_method | | grpc.io/server/sent_bytes_per_rpc | sent_bytes_per_rpc | distribution | server_method | | grpc.io/server/server_latency | server_latency | distribution | server_method | | grpc.io/server/completed_rpcs | server_latency | count | server_method, server_status |` extraViewsSpec := ` | View name | Measure suffix | Aggregation | Tags suffix | |------------------------------------------|---------------------------|--------------|---------------| | grpc.io/server/received_messages_per_rpc | received_messages_per_rpc | distribution | server_method | | grpc.io/server/sent_messages_per_rpc | sent_messages_per_rpc | distribution | server_method |` lines := strings.Split(defaultViewsSpec, "\n")[3:] lines = append(lines, strings.Split(extraViewsSpec, "\n")[3:]...) type viewDef struct { name string measureSuffix string aggregation string tags string } viewDefs := make([]viewDef, 0, len(lines)) for _, line := range lines { cols := colSep.Split(line, -1)[1:] if len(cols) < 4 { t.Fatalf("Invalid config line %#v", cols) } viewDefs = append(viewDefs, viewDef{cols[0], cols[1], cols[2], cols[3]}) } views := DefaultServerViews views = append(views, ServerReceivedMessagesPerRPCView, ServerSentMessagesPerRPCView) if got, want := len(views), len(viewDefs); got != want { t.Fatalf("len(gotMeasures) = %d; want %d", got, want) } for i, v := range views { defn := viewDefs[i] if got, want := v.Name, defn.name; got != want { t.Errorf("Name = %q; want %q", got, want) } if got, want := v.Measure.Name(), "grpc.io/server/"+defn.measureSuffix; got != want { t.Errorf("%q: Measure.Name = %q; want %q", defn.name, got, want) } switch v.Aggregation.Type { case view.AggTypeDistribution: if got, want := "distribution", defn.aggregation; got != want { t.Errorf("%q: Description = %q; want %q", defn.name, got, want) } case view.AggTypeCount: if got, want := "count", defn.aggregation; got != want { t.Errorf("%q: Description = %q; want %q", defn.name, got, want) } default: t.Errorf("Invalid aggregation type") } wantTags := strings.Split(defn.tags, ", ") if got, want := len(v.TagKeys), len(wantTags); got != want { t.Errorf("len(TagKeys) = %d; want %d", got, want) } for j := range wantTags { if got, want := v.TagKeys[j].Name(), "grpc_"+wantTags[j]; got != want { t.Errorf("TagKeys[%d].Name() = %q; want %q", j, got, want) } } } } opencensus-go-0.24.0/plugin/ocgrpc/server_stats_handler.go000066400000000000000000000035371433102037600237140ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "time" "context" "go.opencensus.io/tag" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" ) // statsTagRPC gets the metadata from gRPC context, extracts the encoded tags from // it and creates a new tag.Map and puts them into the returned context. func (h *ServerHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { startTime := time.Now() if info == nil { if grpclog.V(2) { grpclog.Infof("opencensus: TagRPC called with nil info.") } return ctx } d := &rpcData{ startTime: startTime, method: info.FullMethodName, } propagated := h.extractPropagatedTags(ctx) ctx = tag.NewContext(ctx, propagated) ctx, _ = tag.New(ctx, tag.Upsert(KeyServerMethod, methodName(info.FullMethodName))) return context.WithValue(ctx, rpcDataKey, d) } // extractPropagatedTags creates a new tag map containing the tags extracted from the // gRPC metadata. func (h *ServerHandler) extractPropagatedTags(ctx context.Context) *tag.Map { buf := stats.Tags(ctx) if buf == nil { return nil } propagated, err := tag.Decode(buf) if err != nil { if grpclog.V(2) { grpclog.Warningf("opencensus: Failed to decode tags from gRPC metadata failed to decode: %v", err) } return nil } return propagated } opencensus-go-0.24.0/plugin/ocgrpc/server_stats_handler_test.go000066400000000000000000000247131433102037600247520ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "context" "reflect" "testing" "google.golang.org/grpc/codes" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.opencensus.io/trace" ) func TestServerDefaultCollections(t *testing.T) { k1 := tag.MustNewKey("k1") k2 := tag.MustNewKey("k2") type tagPair struct { k tag.Key v string } type wantData struct { v func() *view.View rows []*view.Row } type rpc struct { tags []tagPair tagInfo *stats.RPCTagInfo inPayloads []*stats.InPayload outPayloads []*stats.OutPayload end *stats.End } type testCase struct { label string rpcs []*rpc wants []*wantData } tcs := []testCase{ { "1", []*rpc{ { []tagPair{{k1, "v1"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 10}, }, []*stats.OutPayload{ {Length: 10}, }, &stats.End{Error: nil}, }, }, []*wantData{ { func() *view.View { return ServerReceivedMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), }, }, }, { func() *view.View { return ServerSentMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), }, }, }, { func() *view.View { return ServerReceivedBytesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), }, }, }, { func() *view.View { return ServerSentBytesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), }, }, }, }, }, { "2", []*rpc{ { []tagPair{{k1, "v1"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 10}, }, []*stats.OutPayload{ {Length: 10}, {Length: 10}, {Length: 10}, }, &stats.End{Error: nil}, }, { []tagPair{{k1, "v11"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 10}, {Length: 10}, }, []*stats.OutPayload{ {Length: 10}, {Length: 10}, }, &stats.End{Error: status.Error(codes.Canceled, "canceled")}, }, }, []*wantData{ { func() *view.View { return ServerReceivedMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5), }, }, }, { func() *view.View { return ServerSentMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5), }, }, }, }, }, { "3", []*rpc{ { []tagPair{{k1, "v1"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 1}, }, []*stats.OutPayload{ {Length: 1}, {Length: 1024}, {Length: 65536}, }, &stats.End{Error: nil}, }, { []tagPair{{k1, "v1"}, {k2, "v2"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 1024}, }, []*stats.OutPayload{ {Length: 4096}, {Length: 16384}, }, &stats.End{Error: status.Error(codes.Aborted, "aborted")}, }, { []tagPair{{k1, "v11"}, {k2, "v22"}}, &stats.RPCTagInfo{FullMethodName: "/package.service/method"}, []*stats.InPayload{ {Length: 2048}, {Length: 16384}, }, []*stats.OutPayload{ {Length: 2048}, {Length: 4096}, {Length: 16384}, }, &stats.End{Error: status.Error(codes.Canceled, "canceled")}, }, }, []*wantData{ { func() *view.View { return ServerReceivedMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2), }, }, }, { func() *view.View { return ServerSentMessagesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2), }, }, }, { func() *view.View { return ServerReceivedBytesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.6666667, 2.1459558466666667e+08), }, }, }, { func() *view.View { return ServerSentBytesPerRPCView }, []*view.Row{ { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, Data: newDistributionData([]int64{0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09), }, }, }, }, }, } views := append(DefaultServerViews[:], ServerReceivedMessagesPerRPCView, ServerSentMessagesPerRPCView) for _, tc := range tcs { if err := view.Register(views...); err != nil { t.Fatal(err) } h := &ServerHandler{} h.StartOptions.Sampler = trace.NeverSample() for _, rpc := range tc.rpcs { mods := []tag.Mutator{} for _, t := range rpc.tags { mods = append(mods, tag.Upsert(t.k, t.v)) } ctx, err := tag.New(context.Background(), mods...) if err != nil { t.Errorf("%q: NewMap = %v", tc.label, err) } encoded := tag.Encode(tag.FromContext(ctx)) ctx = stats.SetTags(context.Background(), encoded) ctx = h.TagRPC(ctx, rpc.tagInfo) for _, in := range rpc.inPayloads { h.HandleRPC(ctx, in) } for _, out := range rpc.outPayloads { h.HandleRPC(ctx, out) } h.HandleRPC(ctx, rpc.end) } for _, wantData := range tc.wants { gotRows, err := view.RetrieveData(wantData.v().Name) if err != nil { t.Errorf("%q: RetrieveData (%q) = %v", tc.label, wantData.v().Name, err) continue } for i := range gotRows { view.ClearStart(gotRows[i].Data) } for _, gotRow := range gotRows { if !containsRow(wantData.rows, gotRow) { t.Errorf("%q: unwanted row for view %q: %v", tc.label, wantData.v().Name, gotRow) break } } for _, wantRow := range wantData.rows { if !containsRow(gotRows, wantRow) { t.Errorf("%q: missing row for view %q: %v", tc.label, wantData.v().Name, wantRow) break } } } // Unregister views to cleanup. view.Unregister(views...) } } func newDistributionData(countPerBucket []int64, count int64, min, max, mean, sumOfSquaredDev float64) *view.DistributionData { return &view.DistributionData{ Count: count, Min: min, Max: max, Mean: mean, SumOfSquaredDev: sumOfSquaredDev, CountPerBucket: countPerBucket, } } func TestServerRecordExemplar(t *testing.T) { key := tag.MustNewKey("test_key") tagInfo := &stats.RPCTagInfo{FullMethodName: "/package.service/method"} out := &stats.OutPayload{Length: 2000} end := &stats.End{Error: nil} if err := view.Register(ServerSentBytesPerRPCView); err != nil { t.Error(err) } h := &ServerHandler{} h.StartOptions.Sampler = trace.AlwaysSample() ctx, err := tag.New(context.Background(), tag.Upsert(key, "test_val")) if err != nil { t.Error(err) } encoded := tag.Encode(tag.FromContext(ctx)) ctx = stats.SetTags(context.Background(), encoded) ctx = h.TagRPC(ctx, tagInfo) out.Client = false h.HandleRPC(ctx, out) end.Client = false h.HandleRPC(ctx, end) span := trace.FromContext(ctx) if span == nil { t.Fatal("expected non-nil span, got nil") } if !span.IsRecordingEvents() { t.Errorf("span should be sampled") } attachments := map[string]interface{}{metricdata.AttachmentKeySpanContext: span.SpanContext()} wantExemplar := &metricdata.Exemplar{Value: 2000, Attachments: attachments} rows, err := view.RetrieveData(ServerSentBytesPerRPCView.Name) if err != nil { t.Fatal("Error RetrieveData ", err) } if len(rows) == 0 { t.Fatal("No data was recorded.") } data := rows[0].Data dis, ok := data.(*view.DistributionData) if !ok { t.Fatal("want DistributionData, got ", data) } // Only recorded value is 2000, which falls into the second bucket (1024, 2048]. wantBuckets := []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} if !reflect.DeepEqual(dis.CountPerBucket, wantBuckets) { t.Errorf("want buckets %v, got %v", wantBuckets, dis.CountPerBucket) } for i, e := range dis.ExemplarsPerBucket { // Only the second bucket should have an exemplar. if i == 1 { if diff := cmpExemplar(e, wantExemplar); diff != "" { t.Fatalf("Unexpected Exemplar -got +want: %s", diff) } } else if e != nil { t.Errorf("want nil exemplar, got %v", e) } } // Unregister views to cleanup. view.Unregister(ServerSentBytesPerRPCView) } opencensus-go-0.24.0/plugin/ocgrpc/stats_common.go000066400000000000000000000164261433102037600222020ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package ocgrpc import ( "context" "strconv" "strings" "sync/atomic" "time" "go.opencensus.io/metric/metricdata" ocstats "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) type grpcInstrumentationKey string // rpcData holds the instrumentation RPC data that is needed between the start // and end of an call. It holds the info that this package needs to keep track // of between the various GRPC events. type rpcData struct { // reqCount and respCount has to be the first words // in order to be 64-aligned on 32-bit architectures. sentCount, sentBytes, recvCount, recvBytes int64 // access atomically // startTime represents the time at which TagRPC was invoked at the // beginning of an RPC. It is an appoximation of the time when the // application code invoked GRPC code. startTime time.Time method string } // The following variables define the default hard-coded auxiliary data used by // both the default GRPC client and GRPC server metrics. var ( DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) ) // Server tags are applied to the context used to process each RPC, as well as // the measures at the end of each RPC. var ( KeyServerMethod = tag.MustNewKey("grpc_server_method") KeyServerStatus = tag.MustNewKey("grpc_server_status") ) // Client tags are applied to measures at the end of each RPC. var ( KeyClientMethod = tag.MustNewKey("grpc_client_method") KeyClientStatus = tag.MustNewKey("grpc_client_status") ) var ( rpcDataKey = grpcInstrumentationKey("opencensus-rpcData") ) func methodName(fullname string) string { return strings.TrimLeft(fullname, "/") } // statsHandleRPC processes the RPC events. func statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: // do nothing for client case *stats.Begin: handleRPCBegin(ctx, st) case *stats.OutPayload: handleRPCOutPayload(ctx, st) case *stats.InPayload: handleRPCInPayload(ctx, st) case *stats.End: handleRPCEnd(ctx, st) default: grpclog.Infof("unexpected stats: %T", st) } } func handleRPCBegin(ctx context.Context, s *stats.Begin) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { if grpclog.V(2) { grpclog.Infoln("Failed to retrieve *rpcData from context.") } } if s.IsClient() { ocstats.RecordWithOptions(ctx, ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), ocstats.WithMeasurements(ClientStartedRPCs.M(1))) } else { ocstats.RecordWithOptions(ctx, ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), ocstats.WithMeasurements(ServerStartedRPCs.M(1))) } } func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { if grpclog.V(2) { grpclog.Infoln("Failed to retrieve *rpcData from context.") } return } atomic.AddInt64(&d.sentBytes, int64(s.Length)) atomic.AddInt64(&d.sentCount, 1) } func handleRPCInPayload(ctx context.Context, s *stats.InPayload) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { if grpclog.V(2) { grpclog.Infoln("Failed to retrieve *rpcData from context.") } return } atomic.AddInt64(&d.recvBytes, int64(s.Length)) atomic.AddInt64(&d.recvCount, 1) } func handleRPCEnd(ctx context.Context, s *stats.End) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { if grpclog.V(2) { grpclog.Infoln("Failed to retrieve *rpcData from context.") } return } elapsedTime := time.Since(d.startTime) var st string if s.Error != nil { s, ok := status.FromError(s.Error) if ok { st = statusCodeToString(s) } } else { st = "OK" } latencyMillis := float64(elapsedTime) / float64(time.Millisecond) attachments := getSpanCtxAttachment(ctx) if s.Client { ocstats.RecordWithOptions(ctx, ocstats.WithTags( tag.Upsert(KeyClientMethod, methodName(d.method)), tag.Upsert(KeyClientStatus, st)), ocstats.WithAttachments(attachments), ocstats.WithMeasurements( ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), ClientRoundtripLatency.M(latencyMillis))) } else { ocstats.RecordWithOptions(ctx, ocstats.WithTags( tag.Upsert(KeyServerStatus, st), ), ocstats.WithAttachments(attachments), ocstats.WithMeasurements( ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), ServerLatency.M(latencyMillis))) } } func statusCodeToString(s *status.Status) string { // see https://github.com/grpc/grpc/blob/master/doc/statuscodes.md switch c := s.Code(); c { case codes.OK: return "OK" case codes.Canceled: return "CANCELLED" case codes.Unknown: return "UNKNOWN" case codes.InvalidArgument: return "INVALID_ARGUMENT" case codes.DeadlineExceeded: return "DEADLINE_EXCEEDED" case codes.NotFound: return "NOT_FOUND" case codes.AlreadyExists: return "ALREADY_EXISTS" case codes.PermissionDenied: return "PERMISSION_DENIED" case codes.ResourceExhausted: return "RESOURCE_EXHAUSTED" case codes.FailedPrecondition: return "FAILED_PRECONDITION" case codes.Aborted: return "ABORTED" case codes.OutOfRange: return "OUT_OF_RANGE" case codes.Unimplemented: return "UNIMPLEMENTED" case codes.Internal: return "INTERNAL" case codes.Unavailable: return "UNAVAILABLE" case codes.DataLoss: return "DATA_LOSS" case codes.Unauthenticated: return "UNAUTHENTICATED" default: return "CODE_" + strconv.FormatInt(int64(c), 10) } } func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { attachments := map[string]interface{}{} span := trace.FromContext(ctx) if span == nil { return attachments } spanCtx := span.SpanContext() if spanCtx.IsSampled() { attachments[metricdata.AttachmentKeySpanContext] = spanCtx } return attachments } opencensus-go-0.24.0/plugin/ocgrpc/trace_common.go000066400000000000000000000075261433102037600221430ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocgrpc import ( "context" "strings" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) const traceContextKey = "grpc-trace-bin" // TagRPC creates a new trace span for the client side of the RPC. // // It returns ctx with the new trace span added and a serialization of the // SpanContext added to the outgoing gRPC metadata. func (c *ClientHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { name := strings.TrimPrefix(rti.FullMethodName, "/") name = strings.Replace(name, "/", ".", -1) ctx, span := trace.StartSpan(ctx, name, trace.WithSampler(c.StartOptions.Sampler), trace.WithSpanKind(trace.SpanKindClient)) // span is ended by traceHandleRPC traceContextBinary := propagation.Binary(span.SpanContext()) return metadata.AppendToOutgoingContext(ctx, traceContextKey, string(traceContextBinary)) } // TagRPC creates a new trace span for the server side of the RPC. // // It checks the incoming gRPC metadata in ctx for a SpanContext, and if // it finds one, uses that SpanContext as the parent context of the new span. // // It returns ctx, with the new trace span added. func (s *ServerHandler) traceTagRPC(ctx context.Context, rti *stats.RPCTagInfo) context.Context { md, _ := metadata.FromIncomingContext(ctx) name := strings.TrimPrefix(rti.FullMethodName, "/") name = strings.Replace(name, "/", ".", -1) traceContext := md[traceContextKey] var ( parent trace.SpanContext haveParent bool ) if len(traceContext) > 0 { // Metadata with keys ending in -bin are actually binary. They are base64 // encoded before being put on the wire, see: // https://github.com/grpc/grpc-go/blob/08d6261/Documentation/grpc-metadata.md#storing-binary-data-in-metadata traceContextBinary := []byte(traceContext[0]) parent, haveParent = propagation.FromBinary(traceContextBinary) if haveParent && !s.IsPublicEndpoint { ctx, _ := trace.StartSpanWithRemoteParent(ctx, name, parent, trace.WithSpanKind(trace.SpanKindServer), trace.WithSampler(s.StartOptions.Sampler), ) return ctx } } ctx, span := trace.StartSpan(ctx, name, trace.WithSpanKind(trace.SpanKindServer), trace.WithSampler(s.StartOptions.Sampler)) if haveParent { span.AddLink(trace.Link{TraceID: parent.TraceID, SpanID: parent.SpanID, Type: trace.LinkTypeChild}) } return ctx } func traceHandleRPC(ctx context.Context, rs stats.RPCStats) { span := trace.FromContext(ctx) // TODO: compressed and uncompressed sizes are not populated in every message. switch rs := rs.(type) { case *stats.Begin: span.AddAttributes( trace.BoolAttribute("Client", rs.Client), trace.BoolAttribute("FailFast", rs.FailFast)) case *stats.InPayload: span.AddMessageReceiveEvent(0 /* TODO: messageID */, int64(rs.Length), int64(rs.WireLength)) case *stats.OutPayload: span.AddMessageSendEvent(0, int64(rs.Length), int64(rs.WireLength)) case *stats.End: if rs.Error != nil { s, ok := status.FromError(rs.Error) if ok { span.SetStatus(trace.Status{Code: int32(s.Code()), Message: s.Message()}) } else { span.SetStatus(trace.Status{Code: int32(codes.Internal), Message: rs.Error.Error()}) } } span.End() } } opencensus-go-0.24.0/plugin/ocgrpc/trace_common_test.go000066400000000000000000000023751433102037600231770ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocgrpc import ( "context" "testing" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "go.opencensus.io/trace" ) func TestClientHandler_traceTagRPC(t *testing.T) { ch := &ClientHandler{} ch.StartOptions.Sampler = trace.AlwaysSample() rti := &stats.RPCTagInfo{ FullMethodName: "xxx", } ctx := context.Background() ctx = ch.traceTagRPC(ctx, rti) span := trace.FromContext(ctx) if span == nil { t.Fatal("expected span, got nil") } if !span.IsRecordingEvents() { t.Errorf("span should be sampled") } md, ok := metadata.FromOutgoingContext(ctx) if !ok || len(md) == 0 || len(md[traceContextKey]) == 0 { t.Fatal("no metadata") } } opencensus-go-0.24.0/plugin/ocgrpc/trace_test.go000066400000000000000000000130511433102037600216200ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ocgrpc_test import ( "context" "io" "testing" "time" "go.opencensus.io/internal/testpb" "go.opencensus.io/trace" ) type testExporter struct { ch chan *trace.SpanData } func (t *testExporter) ExportSpan(s *trace.SpanData) { go func() { t.ch <- s }() } func TestStreaming(t *testing.T) { trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) te := testExporter{make(chan *trace.SpanData)} trace.RegisterExporter(&te) defer trace.UnregisterExporter(&te) client, cleanup := testpb.NewTestClient(t) stream, err := client.Multiple(context.Background()) if err != nil { t.Fatalf("Call failed: %v", err) } err = stream.Send(&testpb.FooRequest{}) if err != nil { t.Fatalf("Couldn't send streaming request: %v", err) } stream.CloseSend() for { _, err := stream.Recv() if err == io.EOF { break } if err != nil { t.Errorf("stream.Recv() = %v; want no errors", err) } } cleanup() s1 := <-te.ch s2 := <-te.ch checkSpanData(t, s1, s2, "testpb.Foo.Multiple", true) select { case <-te.ch: t.Fatal("received extra exported spans") case <-time.After(time.Second / 10): } } func TestStreamingFail(t *testing.T) { trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) te := testExporter{make(chan *trace.SpanData)} trace.RegisterExporter(&te) defer trace.UnregisterExporter(&te) client, cleanup := testpb.NewTestClient(t) stream, err := client.Multiple(context.Background()) if err != nil { t.Fatalf("Call failed: %v", err) } err = stream.Send(&testpb.FooRequest{Fail: true}) if err != nil { t.Fatalf("Couldn't send streaming request: %v", err) } stream.CloseSend() for { _, err := stream.Recv() if err == nil || err == io.EOF { t.Errorf("stream.Recv() = %v; want errors", err) } else { break } } s1 := <-te.ch s2 := <-te.ch checkSpanData(t, s1, s2, "testpb.Foo.Multiple", false) cleanup() select { case <-te.ch: t.Fatal("received extra exported spans") case <-time.After(time.Second / 10): } } func TestSingle(t *testing.T) { trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) te := testExporter{make(chan *trace.SpanData)} trace.RegisterExporter(&te) defer trace.UnregisterExporter(&te) client, cleanup := testpb.NewTestClient(t) _, err := client.Single(context.Background(), &testpb.FooRequest{}) if err != nil { t.Fatalf("Couldn't send request: %v", err) } s1 := <-te.ch s2 := <-te.ch checkSpanData(t, s1, s2, "testpb.Foo.Single", true) cleanup() select { case <-te.ch: t.Fatal("received extra exported spans") case <-time.After(time.Second / 10): } } func TestServerSpanDuration(t *testing.T) { client, cleanup := testpb.NewTestClient(t) defer cleanup() te := testExporter{make(chan *trace.SpanData, 100)} trace.RegisterExporter(&te) defer trace.UnregisterExporter(&te) trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) ctx := context.Background() const sleep = 100 * time.Millisecond client.Single(ctx, &testpb.FooRequest{SleepNanos: int64(sleep)}) loop: for { select { case span := <-te.ch: if span.SpanKind != trace.SpanKindServer { continue loop } if got, want := span.EndTime.Sub(span.StartTime), sleep; got < want { t.Errorf("span duration = %dns; want at least %dns", got, want) } break loop default: t.Fatal("no more spans") } } } func TestSingleFail(t *testing.T) { trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) te := testExporter{make(chan *trace.SpanData)} trace.RegisterExporter(&te) defer trace.UnregisterExporter(&te) client, cleanup := testpb.NewTestClient(t) _, err := client.Single(context.Background(), &testpb.FooRequest{Fail: true}) if err == nil { t.Fatalf("Got nil error from request, want non-nil") } s1 := <-te.ch s2 := <-te.ch checkSpanData(t, s1, s2, "testpb.Foo.Single", false) cleanup() select { case <-te.ch: t.Fatal("received extra exported spans") case <-time.After(time.Second / 10): } } func checkSpanData(t *testing.T, s1, s2 *trace.SpanData, methodName string, success bool) { t.Helper() if s1.SpanKind == trace.SpanKindServer { s1, s2 = s2, s1 } if got, want := s1.Name, methodName; got != want { t.Errorf("Got name %q want %q", got, want) } if got, want := s2.Name, methodName; got != want { t.Errorf("Got name %q want %q", got, want) } if got, want := s2.SpanContext.TraceID, s1.SpanContext.TraceID; got != want { t.Errorf("Got trace IDs %s and %s, want them equal", got, want) } if got, want := s2.ParentSpanID, s1.SpanContext.SpanID; got != want { t.Errorf("Got ParentSpanID %s, want %s", got, want) } if got := (s1.Status.Code == 0); got != success { t.Errorf("Got success=%t want %t", got, success) } if got := (s2.Status.Code == 0); got != success { t.Errorf("Got success=%t want %t", got, success) } if s1.HasRemoteParent { t.Errorf("Got HasRemoteParent=%t, want false", s1.HasRemoteParent) } if !s2.HasRemoteParent { t.Errorf("Got HasRemoteParent=%t, want true", s2.HasRemoteParent) } } opencensus-go-0.24.0/plugin/ochttp/000077500000000000000000000000001433102037600171605ustar00rootroot00000000000000opencensus-go-0.24.0/plugin/ochttp/client.go000066400000000000000000000071251433102037600207720ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "net/http" "net/http/httptrace" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) // Transport is an http.RoundTripper that instruments all outgoing requests with // OpenCensus stats and tracing. // // The zero value is intended to be a useful default, but for // now it's recommended that you explicitly set Propagation, since the default // for this may change. type Transport struct { // Base may be set to wrap another http.RoundTripper that does the actual // requests. By default http.DefaultTransport is used. // // If base HTTP roundtripper implements CancelRequest, // the returned round tripper will be cancelable. Base http.RoundTripper // Propagation defines how traces are propagated. If unspecified, a default // (currently B3 format) will be used. Propagation propagation.HTTPFormat // StartOptions are applied to the span started by this Transport around each // request. // // StartOptions.SpanKind will always be set to trace.SpanKindClient // for spans started by this transport. StartOptions trace.StartOptions // GetStartOptions allows to set start options per request. If set, // StartOptions is going to be ignored. GetStartOptions func(*http.Request) trace.StartOptions // NameFromRequest holds the function to use for generating the span name // from the information found in the outgoing HTTP Request. By default the // name equals the URL Path. FormatSpanName func(*http.Request) string // NewClientTrace may be set to a function allowing the current *trace.Span // to be annotated with HTTP request event information emitted by the // httptrace package. NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace // TODO: Implement tag propagation for HTTP. } // RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base() if isHealthEndpoint(req.URL.Path) { return rt.RoundTrip(req) } // TODO: remove excessive nesting of http.RoundTrippers here. format := t.Propagation if format == nil { format = defaultFormat } spanNameFormatter := t.FormatSpanName if spanNameFormatter == nil { spanNameFormatter = spanNameFromURL } startOpts := t.StartOptions if t.GetStartOptions != nil { startOpts = t.GetStartOptions(req) } rt = &traceTransport{ base: rt, format: format, startOptions: trace.StartOptions{ Sampler: startOpts.Sampler, SpanKind: trace.SpanKindClient, }, formatSpanName: spanNameFormatter, newClientTrace: t.NewClientTrace, } rt = statsTransport{base: rt} return rt.RoundTrip(req) } func (t *Transport) base() http.RoundTripper { if t.Base != nil { return t.Base } return http.DefaultTransport } // CancelRequest cancels an in-flight request by closing its connection. func (t *Transport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := t.base().(canceler); ok { cr.CancelRequest(req) } } opencensus-go-0.24.0/plugin/ochttp/client_stats.go000066400000000000000000000071471433102037600222140ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "context" "io" "net/http" "strconv" "sync" "time" "go.opencensus.io/stats" "go.opencensus.io/tag" ) // statsTransport is an http.RoundTripper that collects stats for the outgoing requests. type statsTransport struct { base http.RoundTripper } // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { ctx, _ := tag.New(req.Context(), tag.Upsert(KeyClientHost, req.Host), tag.Upsert(Host, req.Host), tag.Upsert(KeyClientPath, req.URL.Path), tag.Upsert(Path, req.URL.Path), tag.Upsert(KeyClientMethod, req.Method), tag.Upsert(Method, req.Method)) req = req.WithContext(ctx) track := &tracker{ start: time.Now(), ctx: ctx, } if req.Body == nil { // TODO: Handle cases where ContentLength is not set. track.reqSize = -1 } else if req.ContentLength > 0 { track.reqSize = req.ContentLength } stats.Record(ctx, ClientRequestCount.M(1)) // Perform request. resp, err := t.base.RoundTrip(req) if err != nil { track.statusCode = http.StatusInternalServerError track.end() } else { track.statusCode = resp.StatusCode if req.Method != "HEAD" { track.respContentLength = resp.ContentLength } if resp.Body == nil { track.end() } else { track.body = resp.Body resp.Body = wrappedBody(track, resp.Body) } } return resp, err } // CancelRequest cancels an in-flight request by closing its connection. func (t statsTransport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := t.base.(canceler); ok { cr.CancelRequest(req) } } type tracker struct { ctx context.Context respSize int64 respContentLength int64 reqSize int64 start time.Time body io.ReadCloser statusCode int endOnce sync.Once } var _ io.ReadCloser = (*tracker)(nil) func (t *tracker) end() { t.endOnce.Do(func() { latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) respSize := t.respSize if t.respSize == 0 && t.respContentLength > 0 { respSize = t.respContentLength } m := []stats.Measurement{ ClientSentBytes.M(t.reqSize), ClientReceivedBytes.M(respSize), ClientRoundtripLatency.M(latencyMs), ClientLatency.M(latencyMs), ClientResponseBytes.M(t.respSize), } if t.reqSize >= 0 { m = append(m, ClientRequestBytes.M(t.reqSize)) } stats.RecordWithTags(t.ctx, []tag.Mutator{ tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), }, m...) }) } func (t *tracker) Read(b []byte) (int, error) { n, err := t.body.Read(b) t.respSize += int64(n) switch err { case nil: return n, nil case io.EOF: t.end() } return n, err } func (t *tracker) Close() error { // Invoking endSpan on Close will help catch the cases // in which a read returned a non-nil error, we set the // span status but didn't end the span. t.end() return t.body.Close() } opencensus-go-0.24.0/plugin/ochttp/client_test.go000066400000000000000000000167631433102037600220410ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp_test import ( "fmt" "io/ioutil" "net/http" "net/http/httptest" "strings" "sync" "testing" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/stats/view" "go.opencensus.io/trace" ) const reqCount = 5 func TestClientNew(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { resp.Write([]byte("Hello, world!")) })) defer server.Close() if err := view.Register( ochttp.ClientSentBytesDistribution, ochttp.ClientReceivedBytesDistribution, ochttp.ClientRoundtripLatencyDistribution, ochttp.ClientCompletedCount, ); err != nil { t.Fatalf("Failed to register ochttp.DefaultClientViews error: %v", err) } views := []string{ "opencensus.io/http/client/sent_bytes", "opencensus.io/http/client/received_bytes", "opencensus.io/http/client/roundtrip_latency", "opencensus.io/http/client/completed_count", } for _, name := range views { v := view.Find(name) if v == nil { t.Errorf("view not found %q", name) continue } } var wg sync.WaitGroup var tr ochttp.Transport errs := make(chan error, reqCount) wg.Add(reqCount) for i := 0; i < reqCount; i++ { go func() { defer wg.Done() req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body")) if err != nil { errs <- fmt.Errorf("error creating request: %v", err) } resp, err := tr.RoundTrip(req) if err != nil { errs <- fmt.Errorf("response error: %v", err) } if err := resp.Body.Close(); err != nil { errs <- fmt.Errorf("error closing response body: %v", err) } if got, want := resp.StatusCode, 200; got != want { errs <- fmt.Errorf("resp.StatusCode=%d; wantCount %d", got, want) } }() } go func() { wg.Wait() close(errs) }() for err := range errs { if err != nil { t.Fatal(err) } } for _, viewName := range views { v := view.Find(viewName) if v == nil { t.Errorf("view not found %q", viewName) continue } rows, err := view.RetrieveData(v.Name) if err != nil { t.Error(err) continue } if got, want := len(rows), 1; got != want { t.Errorf("len(%q) = %d; want %d", viewName, got, want) continue } data := rows[0].Data var count int64 switch data := data.(type) { case *view.CountData: count = data.Value case *view.DistributionData: count = data.Count default: t.Errorf("Unknown data type: %v", data) continue } if got := count; got != reqCount { t.Fatalf("%s = %d; want %d", viewName, got, reqCount) } } } func TestClientOld(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { resp.Write([]byte("Hello, world!")) })) defer server.Close() if err := view.Register(ochttp.DefaultClientViews...); err != nil { t.Fatalf("Failed to register ochttp.DefaultClientViews error: %v", err) } views := []string{ "opencensus.io/http/client/request_count", "opencensus.io/http/client/latency", "opencensus.io/http/client/request_bytes", "opencensus.io/http/client/response_bytes", } for _, name := range views { v := view.Find(name) if v == nil { t.Errorf("view not found %q", name) continue } } var wg sync.WaitGroup var tr ochttp.Transport errs := make(chan error, reqCount) wg.Add(reqCount) for i := 0; i < reqCount; i++ { go func() { defer wg.Done() req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body")) if err != nil { errs <- fmt.Errorf("error creating request: %v", err) } resp, err := tr.RoundTrip(req) if err != nil { errs <- fmt.Errorf("response error: %v", err) } if err := resp.Body.Close(); err != nil { errs <- fmt.Errorf("error closing response body: %v", err) } if got, want := resp.StatusCode, 200; got != want { errs <- fmt.Errorf("resp.StatusCode=%d; wantCount %d", got, want) } }() } go func() { wg.Wait() close(errs) }() for err := range errs { if err != nil { t.Fatal(err) } } for _, viewName := range views { v := view.Find(viewName) if v == nil { t.Errorf("view not found %q", viewName) continue } rows, err := view.RetrieveData(v.Name) if err != nil { t.Error(err) continue } if got, want := len(rows), 1; got != want { t.Errorf("len(%q) = %d; want %d", viewName, got, want) continue } data := rows[0].Data var count int64 switch data := data.(type) { case *view.CountData: count = data.Value case *view.DistributionData: count = data.Count default: t.Errorf("Unknown data type: %v", data) continue } if got := count; got != reqCount { t.Fatalf("%s = %d; want %d", viewName, got, reqCount) } } } var noTrace = trace.StartOptions{Sampler: trace.NeverSample()} func BenchmarkTransportNoTrace(b *testing.B) { benchmarkClientServer(b, &ochttp.Transport{StartOptions: noTrace}) } func BenchmarkTransport(b *testing.B) { benchmarkClientServer(b, &ochttp.Transport{}) } func benchmarkClientServer(b *testing.B, transport *ochttp.Transport) { b.ReportAllocs() ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { fmt.Fprintf(rw, "Hello world.\n") })) defer ts.Close() transport.StartOptions.Sampler = trace.AlwaysSample() var client http.Client client.Transport = transport b.ResetTimer() for i := 0; i < b.N; i++ { res, err := client.Get(ts.URL) if err != nil { b.Fatalf("Get: %v", err) } all, err := ioutil.ReadAll(res.Body) res.Body.Close() if err != nil { b.Fatal("ReadAll:", err) } body := string(all) if body != "Hello world.\n" { b.Fatal("Got body:", body) } } } func BenchmarkTransportParallel64NoTrace(b *testing.B) { benchmarkClientServerParallel(b, 64, &ochttp.Transport{StartOptions: noTrace}) } func BenchmarkTransportParallel64(b *testing.B) { benchmarkClientServerParallel(b, 64, &ochttp.Transport{}) } func benchmarkClientServerParallel(b *testing.B, parallelism int, transport *ochttp.Transport) { b.ReportAllocs() ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { fmt.Fprintf(rw, "Hello world.\n") })) defer ts.Close() var c http.Client transport.Base = &http.Transport{ MaxIdleConns: parallelism, MaxIdleConnsPerHost: parallelism, } transport.StartOptions.Sampler = trace.AlwaysSample() c.Transport = transport b.ResetTimer() // TODO(ramonza): replace with b.RunParallel (it didn't work when I tried) var wg sync.WaitGroup wg.Add(parallelism) for i := 0; i < parallelism; i++ { iterations := b.N / parallelism if i == 0 { iterations += b.N % parallelism } go func() { defer wg.Done() for j := 0; j < iterations; j++ { res, err := c.Get(ts.URL) if err != nil { b.Logf("Get: %v", err) return } all, err := ioutil.ReadAll(res.Body) res.Body.Close() if err != nil { b.Logf("ReadAll: %v", err) return } body := string(all) if body != "Hello world.\n" { panic("Got body: " + body) } } }() } wg.Wait() } opencensus-go-0.24.0/plugin/ochttp/doc.go000066400000000000000000000014751433102037600202630ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package ochttp provides OpenCensus instrumentation for net/http package. // // For server instrumentation, see Handler. For client-side instrumentation, // see Transport. package ochttp // import "go.opencensus.io/plugin/ochttp" opencensus-go-0.24.0/plugin/ochttp/example_test.go000066400000000000000000000040361433102037600222040ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp_test import ( "log" "net/http" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/stats/view" "go.opencensus.io/tag" ) func ExampleTransport() { // import ( // "go.opencensus.io/plugin/ochttp" // "go.opencensus.io/stats/view" // ) if err := view.Register( // Register a few default views. ochttp.ClientSentBytesDistribution, ochttp.ClientReceivedBytesDistribution, ochttp.ClientRoundtripLatencyDistribution, // Register a custom view. &view.View{ Name: "httpclient_latency_by_path", TagKeys: []tag.Key{ochttp.KeyClientPath}, Measure: ochttp.ClientRoundtripLatency, Aggregation: ochttp.DefaultLatencyDistribution, }, ); err != nil { log.Fatal(err) } client := &http.Client{ Transport: &ochttp.Transport{}, } // Use client to perform requests. _ = client } var usersHandler http.Handler func ExampleHandler() { // import "go.opencensus.io/plugin/ochttp" http.Handle("/users", ochttp.WithRouteTag(usersHandler, "/users")) // If no handler is specified, the default mux is used. log.Fatal(http.ListenAndServe("localhost:8080", &ochttp.Handler{})) } func ExampleHandler_mux() { // import "go.opencensus.io/plugin/ochttp" mux := http.NewServeMux() mux.Handle("/users", ochttp.WithRouteTag(usersHandler, "/users")) log.Fatal(http.ListenAndServe("localhost:8080", &ochttp.Handler{ Handler: mux, Propagation: &b3.HTTPFormat{}, })) } opencensus-go-0.24.0/plugin/ochttp/propagation/000077500000000000000000000000001433102037600215035ustar00rootroot00000000000000opencensus-go-0.24.0/plugin/ochttp/propagation/b3/000077500000000000000000000000001433102037600220075ustar00rootroot00000000000000opencensus-go-0.24.0/plugin/ochttp/propagation/b3/b3.go000066400000000000000000000071431433102037600226470ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package b3 contains a propagation.HTTPFormat implementation // for B3 propagation. See https://github.com/openzipkin/b3-propagation // for more details. package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" import ( "encoding/hex" "net/http" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) // B3 headers that OpenCensus understands. const ( TraceIDHeader = "X-B3-TraceId" SpanIDHeader = "X-B3-SpanId" SampledHeader = "X-B3-Sampled" ) // HTTPFormat implements propagation.HTTPFormat to propagate // traces in HTTP headers in B3 propagation format. // HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers // because there are additional fields not represented in the // OpenCensus span context. Spans created from the incoming // header will be the direct children of the client-side span. // Similarly, receiver of the outgoing spans should use client-side // span created by OpenCensus as the parent. type HTTPFormat struct{} var _ propagation.HTTPFormat = (*HTTPFormat)(nil) // SpanContextFromRequest extracts a B3 span context from incoming requests. func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) if !ok { return trace.SpanContext{}, false } sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) if !ok { return trace.SpanContext{}, false } sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) return trace.SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: sampled, }, true } // ParseTraceID parses the value of the X-B3-TraceId header. func ParseTraceID(tid string) (trace.TraceID, bool) { if tid == "" { return trace.TraceID{}, false } b, err := hex.DecodeString(tid) if err != nil || len(b) > 16 { return trace.TraceID{}, false } var traceID trace.TraceID if len(b) <= 8 { // The lower 64-bits. start := 8 + (8 - len(b)) copy(traceID[start:], b) } else { start := 16 - len(b) copy(traceID[start:], b) } return traceID, true } // ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { if sid == "" { return trace.SpanID{}, false } b, err := hex.DecodeString(sid) if err != nil || len(b) > 8 { return trace.SpanID{}, false } start := 8 - len(b) copy(spanID[start:], b) return spanID, true } // ParseSampled parses the value of the X-B3-Sampled header. func ParseSampled(sampled string) (trace.TraceOptions, bool) { switch sampled { case "true", "1": return trace.TraceOptions(1), true default: return trace.TraceOptions(0), false } } // SpanContextToRequest modifies the given request to include B3 headers. func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) var sampled string if sc.IsSampled() { sampled = "1" } else { sampled = "0" } req.Header.Set(SampledHeader, sampled) } opencensus-go-0.24.0/plugin/ochttp/propagation/b3/b3_test.go000066400000000000000000000167151433102037600237130ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package b3 import ( "net/http" "reflect" "testing" "go.opencensus.io/trace" ) func TestHTTPFormat_FromRequest(t *testing.T) { tests := []struct { name string makeReq func() *http.Request wantSc trace.SpanContext wantOk bool }{ { name: "128-bit trace ID + 64-bit span ID; sampled=1", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124") req.Header.Set(SpanIDHeader, "0020000000000001") req.Header.Set(SampledHeader, "1") return req }, wantSc: trace.SpanContext{ TraceID: trace.TraceID{70, 58, 195, 92, 159, 100, 19, 173, 72, 72, 90, 57, 83, 187, 97, 36}, SpanID: trace.SpanID{0, 32, 0, 0, 0, 0, 0, 1}, TraceOptions: trace.TraceOptions(1), }, wantOk: true, }, { name: "short trace ID + short span ID; sampled=1", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "000102") req.Header.Set(SpanIDHeader, "000102") req.Header.Set(SampledHeader, "1") return req }, wantSc: trace.SpanContext{ TraceID: trace.TraceID{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2}, SpanID: trace.SpanID{0, 0, 0, 0, 0, 0, 1, 2}, TraceOptions: trace.TraceOptions(1), }, wantOk: true, }, { name: "64-bit trace ID + 64-bit span ID; sampled=0", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "0020000000000001") req.Header.Set(SpanIDHeader, "0020000000000001") req.Header.Set(SampledHeader, "0") return req }, wantSc: trace.SpanContext{ TraceID: trace.TraceID{0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 1}, SpanID: trace.SpanID{0, 32, 0, 0, 0, 0, 0, 1}, TraceOptions: trace.TraceOptions(0), }, wantOk: true, }, { name: "128-bit trace ID + 64-bit span ID; no sampling header", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124") req.Header.Set(SpanIDHeader, "0020000000000001") return req }, wantSc: trace.SpanContext{ TraceID: trace.TraceID{70, 58, 195, 92, 159, 100, 19, 173, 72, 72, 90, 57, 83, 187, 97, 36}, SpanID: trace.SpanID{0, 32, 0, 0, 0, 0, 0, 1}, TraceOptions: trace.TraceOptions(0), }, wantOk: true, }, { name: "invalid trace ID + 64-bit span ID; no sampling header", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "") req.Header.Set(SpanIDHeader, "0020000000000001") return req }, wantSc: trace.SpanContext{}, wantOk: false, }, { name: "invalid >128-bit trace ID + 64-bit span ID; no sampling header", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "0020000000000001002000000000000111") req.Header.Set(SpanIDHeader, "0020000000000001") return req }, wantSc: trace.SpanContext{}, wantOk: false, }, { name: "128-bit trace ID; invalid span ID; no sampling header", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124") req.Header.Set(SpanIDHeader, "") return req }, wantSc: trace.SpanContext{}, wantOk: false, }, { name: "128-bit trace ID; invalid >64 bit span ID; no sampling header", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124") req.Header.Set(SpanIDHeader, "002000000000000111") return req }, wantSc: trace.SpanContext{}, wantOk: false, }, { name: "128-bit trace ID + 64-bit span ID; sampled=true", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124") req.Header.Set(SpanIDHeader, "0020000000000001") req.Header.Set(SampledHeader, "true") return req }, wantSc: trace.SpanContext{ TraceID: trace.TraceID{70, 58, 195, 92, 159, 100, 19, 173, 72, 72, 90, 57, 83, 187, 97, 36}, SpanID: trace.SpanID{0, 32, 0, 0, 0, 0, 0, 1}, TraceOptions: trace.TraceOptions(1), }, wantOk: true, }, { name: "128-bit trace ID + 64-bit span ID; sampled=false", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set(TraceIDHeader, "463ac35c9f6413ad48485a3953bb6124") req.Header.Set(SpanIDHeader, "0020000000000001") req.Header.Set(SampledHeader, "false") return req }, wantSc: trace.SpanContext{ TraceID: trace.TraceID{70, 58, 195, 92, 159, 100, 19, 173, 72, 72, 90, 57, 83, 187, 97, 36}, SpanID: trace.SpanID{0, 32, 0, 0, 0, 0, 0, 1}, TraceOptions: trace.TraceOptions(0), }, wantOk: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { f := &HTTPFormat{} sc, ok := f.SpanContextFromRequest(tt.makeReq()) if ok != tt.wantOk { t.Errorf("HTTPFormat.SpanContextFromRequest() got ok = %v, want %v", ok, tt.wantOk) } if !reflect.DeepEqual(sc, tt.wantSc) { t.Errorf("HTTPFormat.SpanContextFromRequest() got span context = %v, want %v", sc, tt.wantSc) } }) } } func TestHTTPFormat_ToRequest(t *testing.T) { tests := []struct { name string sc trace.SpanContext wantHeaders map[string]string }{ { name: "valid traceID, header ID, sampled=1", sc: trace.SpanContext{ TraceID: trace.TraceID{70, 58, 195, 92, 159, 100, 19, 173, 72, 72, 90, 57, 83, 187, 97, 36}, SpanID: trace.SpanID{0, 32, 0, 0, 0, 0, 0, 1}, TraceOptions: trace.TraceOptions(1), }, wantHeaders: map[string]string{ "X-B3-TraceId": "463ac35c9f6413ad48485a3953bb6124", "X-B3-SpanId": "0020000000000001", "X-B3-Sampled": "1", }, }, { name: "valid traceID, header ID, sampled=0", sc: trace.SpanContext{ TraceID: trace.TraceID{70, 58, 195, 92, 159, 100, 19, 173, 72, 72, 90, 57, 83, 187, 97, 36}, SpanID: trace.SpanID{0, 32, 0, 0, 0, 0, 0, 1}, TraceOptions: trace.TraceOptions(0), }, wantHeaders: map[string]string{ "X-B3-TraceId": "463ac35c9f6413ad48485a3953bb6124", "X-B3-SpanId": "0020000000000001", "X-B3-Sampled": "0", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { f := &HTTPFormat{} req, _ := http.NewRequest("GET", "http://example.com", nil) f.SpanContextToRequest(tt.sc, req) for k, v := range tt.wantHeaders { if got, want := req.Header.Get(k), v; got != want { t.Errorf("req.Header.Get(%q) = %q; want %q", k, got, want) } } }) } } opencensus-go-0.24.0/plugin/ochttp/propagation/tracecontext/000077500000000000000000000000001433102037600242065ustar00rootroot00000000000000opencensus-go-0.24.0/plugin/ochttp/propagation/tracecontext/propagation.go000066400000000000000000000134641433102037600270700ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package tracecontext contains HTTP propagator for TraceContext standard. // See https://github.com/w3c/distributed-tracing for more information. package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext" import ( "encoding/hex" "fmt" "net/http" "net/textproto" "regexp" "strings" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" "go.opencensus.io/trace/tracestate" ) const ( supportedVersion = 0 maxVersion = 254 maxTracestateLen = 512 traceparentHeader = "traceparent" tracestateHeader = "tracestate" trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$` ) var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt) var _ propagation.HTTPFormat = (*HTTPFormat)(nil) // HTTPFormat implements the TraceContext trace propagation format. type HTTPFormat struct{} // SpanContextFromRequest extracts a span context from incoming requests. func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { tp, _ := getRequestHeader(req, traceparentHeader, false) ts, _ := getRequestHeader(req, tracestateHeader, true) return f.SpanContextFromHeaders(tp, ts) } // SpanContextFromHeaders extracts a span context from provided header values. func (f *HTTPFormat) SpanContextFromHeaders(tp string, ts string) (sc trace.SpanContext, ok bool) { if tp == "" { return trace.SpanContext{}, false } sections := strings.Split(tp, "-") if len(sections) < 4 { return trace.SpanContext{}, false } if len(sections[0]) != 2 { return trace.SpanContext{}, false } ver, err := hex.DecodeString(sections[0]) if err != nil { return trace.SpanContext{}, false } version := int(ver[0]) if version > maxVersion { return trace.SpanContext{}, false } if version == 0 && len(sections) != 4 { return trace.SpanContext{}, false } if len(sections[1]) != 32 { return trace.SpanContext{}, false } tid, err := hex.DecodeString(sections[1]) if err != nil { return trace.SpanContext{}, false } copy(sc.TraceID[:], tid) if len(sections[2]) != 16 { return trace.SpanContext{}, false } sid, err := hex.DecodeString(sections[2]) if err != nil { return trace.SpanContext{}, false } copy(sc.SpanID[:], sid) opts, err := hex.DecodeString(sections[3]) if err != nil || len(opts) < 1 { return trace.SpanContext{}, false } sc.TraceOptions = trace.TraceOptions(opts[0]) // Don't allow all zero trace or span ID. if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} { return trace.SpanContext{}, false } sc.Tracestate = tracestateFromHeader(ts) return sc, true } // getRequestHeader returns a combined header field according to RFC7230 section 3.2.2. // If commaSeparated is true, multiple header fields with the same field name using be // combined using ",". // If no header was found using the given name, "ok" would be false. // If more than one headers was found using the given name, while commaSeparated is false, // "ok" would be false. func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) { v := req.Header[textproto.CanonicalMIMEHeaderKey(name)] switch len(v) { case 0: return "", false case 1: return v[0], true default: return strings.Join(v, ","), commaSeparated } } // TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error. // Revisit to return additional boolean value to indicate parsing error when following issues // are resolved. // https://github.com/w3c/distributed-tracing/issues/172 // https://github.com/w3c/distributed-tracing/issues/175 func tracestateFromHeader(ts string) *tracestate.Tracestate { if ts == "" { return nil } var entries []tracestate.Entry pairs := strings.Split(ts, ",") hdrLenWithoutOWS := len(pairs) - 1 // Number of commas for _, pair := range pairs { matches := trimOWSRegExp.FindStringSubmatch(pair) if matches == nil { return nil } pair = matches[1] hdrLenWithoutOWS += len(pair) if hdrLenWithoutOWS > maxTracestateLen { return nil } kv := strings.Split(pair, "=") if len(kv) != 2 { return nil } entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]}) } tsParsed, err := tracestate.New(nil, entries...) if err != nil { return nil } return tsParsed } func tracestateToHeader(sc trace.SpanContext) string { var pairs = make([]string, 0, len(sc.Tracestate.Entries())) if sc.Tracestate != nil { for _, entry := range sc.Tracestate.Entries() { pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "=")) } h := strings.Join(pairs, ",") if h != "" && len(h) <= maxTracestateLen { return h } } return "" } // SpanContextToHeaders serialize the SpanContext to traceparent and tracestate headers. func (f *HTTPFormat) SpanContextToHeaders(sc trace.SpanContext) (tp string, ts string) { tp = fmt.Sprintf("%x-%x-%x-%x", []byte{supportedVersion}, sc.TraceID[:], sc.SpanID[:], []byte{byte(sc.TraceOptions)}) ts = tracestateToHeader(sc) return } // SpanContextToRequest modifies the given request to include traceparent and tracestate headers. func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { tp, ts := f.SpanContextToHeaders(sc) req.Header.Set(traceparentHeader, tp) if ts != "" { req.Header.Set(tracestateHeader, ts) } } opencensus-go-0.24.0/plugin/ochttp/propagation/tracecontext/propagation_test.go000066400000000000000000000207711433102037600301260ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracecontext import ( "fmt" "net/http" "reflect" "strings" "testing" "go.opencensus.io/trace" "go.opencensus.io/trace/tracestate" ) var ( tpHeader = "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01" traceID = trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54} spanID = trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183} traceOpt = trace.TraceOptions(1) oversizeValue = strings.Repeat("a", maxTracestateLen/2) oversizeEntry1 = tracestate.Entry{Key: "foo", Value: oversizeValue} oversizeEntry2 = tracestate.Entry{Key: "hello", Value: oversizeValue} entry1 = tracestate.Entry{Key: "foo", Value: "bar"} entry2 = tracestate.Entry{Key: "hello", Value: "world example"} oversizeTs, _ = tracestate.New(nil, oversizeEntry1, oversizeEntry2) defaultTs, _ = tracestate.New(nil, nil...) nonDefaultTs, _ = tracestate.New(nil, entry1, entry2) ) func TestHTTPFormat_FromRequest(t *testing.T) { tests := []struct { name string header string wantSc trace.SpanContext wantOk bool }{ { name: "future version", header: "02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", wantSc: trace.SpanContext{ TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54}, SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183}, TraceOptions: trace.TraceOptions(1), }, wantOk: true, }, { name: "zero trace ID and span ID", header: "00-00000000000000000000000000000000-0000000000000000-01", wantSc: trace.SpanContext{}, wantOk: false, }, { name: "valid header", header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", wantSc: trace.SpanContext{ TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54}, SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183}, TraceOptions: trace.TraceOptions(1), }, wantOk: true, }, { name: "missing options", header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7", wantSc: trace.SpanContext{}, wantOk: false, }, { name: "empty options", header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-", wantSc: trace.SpanContext{}, wantOk: false, }, } f := &HTTPFormat{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set("traceparent", tt.header) gotSc, gotOk := f.SpanContextFromRequest(req) if !reflect.DeepEqual(gotSc, tt.wantSc) { t.Errorf("HTTPFormat.FromRequest() gotSc = %v, want %v", gotSc, tt.wantSc) } if gotOk != tt.wantOk { t.Errorf("HTTPFormat.FromRequest() gotOk = %v, want %v", gotOk, tt.wantOk) } gotSc, gotOk = f.SpanContextFromHeaders(tt.header, "") if !reflect.DeepEqual(gotSc, tt.wantSc) { t.Errorf("HTTPFormat.SpanContextFromHeaders() gotTs = %v, want %v", gotSc.Tracestate, tt.wantSc.Tracestate) } if gotOk != tt.wantOk { t.Errorf("HTTPFormat.SpanContextFromHeaders() gotOk = %v, want %v", gotOk, tt.wantOk) } }) } } func TestHTTPFormat_ToRequest(t *testing.T) { tests := []struct { sc trace.SpanContext wantHeader string }{ { sc: trace.SpanContext{ TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54}, SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183}, TraceOptions: trace.TraceOptions(1), }, wantHeader: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", }, } for _, tt := range tests { t.Run(tt.wantHeader, func(t *testing.T) { f := &HTTPFormat{} req, _ := http.NewRequest("GET", "http://example.com", nil) f.SpanContextToRequest(tt.sc, req) h := req.Header.Get("traceparent") if got, want := h, tt.wantHeader; got != want { t.Errorf("HTTPFormat.ToRequest() header = %v, want %v", got, want) } gotTp, _ := f.SpanContextToHeaders(tt.sc) if gotTp != tt.wantHeader { t.Errorf("HTTPFormat.SpanContextToHeaders() tracestate header = %v, want %v", gotTp, tt.wantHeader) } }) } } func TestHTTPFormatTracestate_FromRequest(t *testing.T) { scWithNonDefaultTracestate := trace.SpanContext{ TraceID: traceID, SpanID: spanID, TraceOptions: traceOpt, Tracestate: nonDefaultTs, } scWithDefaultTracestate := trace.SpanContext{ TraceID: traceID, SpanID: spanID, TraceOptions: traceOpt, Tracestate: defaultTs, } tests := []struct { name string tpHeader string tsHeader string wantSc trace.SpanContext wantOk bool }{ { name: "tracestate invalid entries delimiter", tpHeader: tpHeader, tsHeader: "foo=bar;hello=world", wantSc: scWithDefaultTracestate, wantOk: true, }, { name: "tracestate invalid key-value delimiter", tpHeader: tpHeader, tsHeader: "foo=bar,hello-world", wantSc: scWithDefaultTracestate, wantOk: true, }, { name: "tracestate invalid value character", tpHeader: tpHeader, tsHeader: "foo=bar,hello=world example \u00a0 ", wantSc: scWithDefaultTracestate, wantOk: true, }, { name: "tracestate blank key-value", tpHeader: tpHeader, tsHeader: "foo=bar, ", wantSc: scWithDefaultTracestate, wantOk: true, }, { name: "tracestate oversize header", tpHeader: tpHeader, tsHeader: fmt.Sprintf("foo=%s,hello=%s", oversizeValue, oversizeValue), wantSc: scWithDefaultTracestate, wantOk: true, }, { name: "tracestate valid", tpHeader: tpHeader, tsHeader: "foo=bar , hello=world example", wantSc: scWithNonDefaultTracestate, wantOk: true, }, } f := &HTTPFormat{} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "http://example.com", nil) req.Header.Set("traceparent", tt.tpHeader) req.Header.Set("tracestate", tt.tsHeader) gotSc, gotOk := f.SpanContextFromRequest(req) if !reflect.DeepEqual(gotSc, tt.wantSc) { t.Errorf("HTTPFormat.FromRequest() gotTs = %v, want %v", gotSc.Tracestate, tt.wantSc.Tracestate) } if gotOk != tt.wantOk { t.Errorf("HTTPFormat.FromRequest() gotOk = %v, want %v", gotOk, tt.wantOk) } gotSc, gotOk = f.SpanContextFromHeaders(tt.tpHeader, tt.tsHeader) if !reflect.DeepEqual(gotSc, tt.wantSc) { t.Errorf("HTTPFormat.SpanContextFromHeaders() gotTs = %v, want %v", gotSc.Tracestate, tt.wantSc.Tracestate) } if gotOk != tt.wantOk { t.Errorf("HTTPFormat.SpanContextFromHeaders() gotOk = %v, want %v", gotOk, tt.wantOk) } }) } } func TestHTTPFormatTracestate_ToRequest(t *testing.T) { tests := []struct { name string sc trace.SpanContext wantHeader string }{ { name: "valid span context with default tracestate", sc: trace.SpanContext{ TraceID: traceID, SpanID: spanID, TraceOptions: traceOpt, }, wantHeader: "", }, { name: "valid span context with non default tracestate", sc: trace.SpanContext{ TraceID: traceID, SpanID: spanID, TraceOptions: traceOpt, Tracestate: nonDefaultTs, }, wantHeader: "foo=bar,hello=world example", }, { name: "valid span context with oversize tracestate", sc: trace.SpanContext{ TraceID: traceID, SpanID: spanID, TraceOptions: traceOpt, Tracestate: oversizeTs, }, wantHeader: "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { f := &HTTPFormat{} req, _ := http.NewRequest("GET", "http://example.com", nil) f.SpanContextToRequest(tt.sc, req) h := req.Header.Get("tracestate") if got, want := h, tt.wantHeader; got != want { t.Errorf("HTTPFormat.ToRequest() tracestate header = %v, want %v", got, want) } _, gotTs := f.SpanContextToHeaders(tt.sc) if gotTs != tt.wantHeader { t.Errorf("HTTPFormat.SpanContextToHeaders() tracestate header = %v, want %v", gotTs, tt.wantHeader) } }) } } opencensus-go-0.24.0/plugin/ochttp/propagation_test.go000066400000000000000000000042071433102037600230740ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "context" "fmt" "io/ioutil" "net/http" "net/http/httptest" "testing" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/plugin/ochttp/propagation/tracecontext" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) func TestRoundTripAllFormats(t *testing.T) { // TODO: test combinations of different formats for chains of calls formats := []propagation.HTTPFormat{ &b3.HTTPFormat{}, &tracecontext.HTTPFormat{}, } ctx := context.Background() ctx, span := trace.StartSpan(ctx, "test", trace.WithSampler(trace.AlwaysSample())) sc := span.SpanContext() wantStr := fmt.Sprintf("trace_id=%x, span_id=%x, options=%d", sc.TraceID, sc.SpanID, sc.TraceOptions) defer span.End() for _, format := range formats { srv := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { sc, ok := format.SpanContextFromRequest(req) if !ok { resp.WriteHeader(http.StatusBadRequest) } fmt.Fprintf(resp, "trace_id=%x, span_id=%x, options=%d", sc.TraceID, sc.SpanID, sc.TraceOptions) })) req, err := http.NewRequest("GET", srv.URL, nil) if err != nil { t.Fatal(err) } format.SpanContextToRequest(span.SpanContext(), req) resp, err := http.DefaultClient.Do(req) if err != nil { t.Fatal(err) } if resp.StatusCode != 200 { t.Fatal(resp.Status) } body, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatal(err) } resp.Body.Close() if got, want := string(body), wantStr; got != want { t.Errorf("%s; want %s", got, want) } srv.Close() } } opencensus-go-0.24.0/plugin/ochttp/route.go000066400000000000000000000040531433102037600206470ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "context" "net/http" "go.opencensus.io/tag" ) // SetRoute sets the http_server_route tag to the given value. // It's useful when an HTTP framework does not support the http.Handler interface // and using WithRouteTag is not an option, but provides a way to hook into the request flow. func SetRoute(ctx context.Context, route string) { if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) } } // WithRouteTag returns an http.Handler that records stats with the // http_server_route tag set to the given value. func WithRouteTag(handler http.Handler, route string) http.Handler { return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} ctx, _ := tag.New(r.Context(), addRoute...) r = r.WithContext(ctx) handler.ServeHTTP(w, r) return addRoute }) } // taggedHandlerFunc is a http.Handler that returns tags describing the // processing of the request. These tags will be recorded along with the // measures in this package at the end of the request. type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { tags := h(w, r) if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { a.t = append(a.t, tags...) } } type addedTagsKey struct{} type addedTags struct { t []tag.Mutator } opencensus-go-0.24.0/plugin/ochttp/route_test.go000066400000000000000000000065061433102037600217130ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp_test import ( "net/http" "net/http/httptest" "testing" "github.com/google/go-cmp/cmp" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/stats/view" "go.opencensus.io/tag" ) func TestWithRouteTag(t *testing.T) { v := &view.View{ Name: "request_total", Measure: ochttp.ServerLatency, Aggregation: view.Count(), TagKeys: []tag.Key{ochttp.KeyServerRoute}, } view.Register(v) var e testStatsExporter view.RegisterExporter(&e) defer view.UnregisterExporter(&e) mux := http.NewServeMux() handler := ochttp.WithRouteTag(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(204) }), "/a/") mux.Handle("/a/", handler) plugin := ochttp.Handler{Handler: mux} req, _ := http.NewRequest("GET", "/a/b/c", nil) rr := httptest.NewRecorder() plugin.ServeHTTP(rr, req) if got, want := rr.Code, 204; got != want { t.Fatalf("Unexpected response, got %d; want %d", got, want) } view.Unregister(v) // trigger exporting got := e.rowsForView("request_total") for i := range got { view.ClearStart(got[i].Data) } want := []*view.Row{ {Data: &view.CountData{Value: 1}, Tags: []tag.Tag{{Key: ochttp.KeyServerRoute, Value: "/a/"}}}, } if diff := cmp.Diff(got, want); diff != "" { t.Errorf("Unexpected view data exported, -got, +want: %s", diff) } } func TestSetRoute(t *testing.T) { v := &view.View{ Name: "request_total", Measure: ochttp.ServerLatency, Aggregation: view.Count(), TagKeys: []tag.Key{ochttp.KeyServerRoute}, } view.Register(v) var e testStatsExporter view.RegisterExporter(&e) defer view.UnregisterExporter(&e) mux := http.NewServeMux() handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ochttp.SetRoute(r.Context(), "/a/") w.WriteHeader(204) }) mux.Handle("/a/", handler) plugin := ochttp.Handler{Handler: mux} req, _ := http.NewRequest("GET", "/a/b/c", nil) rr := httptest.NewRecorder() plugin.ServeHTTP(rr, req) if got, want := rr.Code, 204; got != want { t.Fatalf("Unexpected response, got %d; want %d", got, want) } view.Unregister(v) // trigger exporting got := e.rowsForView("request_total") for i := range got { view.ClearStart(got[i].Data) } want := []*view.Row{ {Data: &view.CountData{Value: 1}, Tags: []tag.Tag{{Key: ochttp.KeyServerRoute, Value: "/a/"}}}, } if diff := cmp.Diff(got, want); diff != "" { t.Errorf("Unexpected view data exported, -got, +want: %s", diff) } } type testStatsExporter struct { vd []*view.Data } func (t *testStatsExporter) ExportView(d *view.Data) { t.vd = append(t.vd, d) } func (t *testStatsExporter) rowsForView(name string) []*view.Row { var rows []*view.Row for _, d := range t.vd { if d.View.Name == name { rows = append(rows, d.Rows...) } } return rows } opencensus-go-0.24.0/plugin/ochttp/server.go000066400000000000000000000267651433102037600210350ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "context" "io" "net/http" "strconv" "sync" "time" "go.opencensus.io/stats" "go.opencensus.io/tag" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) // Handler is an http.Handler wrapper to instrument your HTTP server with // OpenCensus. It supports both stats and tracing. // // # Tracing // // This handler is aware of the incoming request's span, reading it from request // headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // // span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. type Handler struct { // Propagation defines how traces are propagated. If unspecified, // B3 propagation will be used. Propagation propagation.HTTPFormat // Handler is the handler used to handle the incoming request. Handler http.Handler // StartOptions are applied to the span started by this Handler around each // request. // // StartOptions.SpanKind will always be set to trace.SpanKindServer // for spans started by this transport. StartOptions trace.StartOptions // GetStartOptions allows to set start options per request. If set, // StartOptions is going to be ignored. GetStartOptions func(*http.Request) trace.StartOptions // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) // servers. If true, any trace metadata set on the incoming request will // be added as a linked trace instead of being added as a parent of the // current trace. IsPublicEndpoint bool // FormatSpanName holds the function to use for generating the span name // from the information found in the incoming HTTP Request. By default the // name equals the URL Path. FormatSpanName func(*http.Request) string // IsHealthEndpoint holds the function to use for determining if the // incoming HTTP request should be considered a health check. This is in // addition to the private isHealthEndpoint func which may also indicate // tracing should be skipped. IsHealthEndpoint func(*http.Request) bool } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var tags addedTags r, traceEnd := h.startTrace(w, r) defer traceEnd() w, statsEnd := h.startStats(w, r) defer statsEnd(&tags) handler := h.Handler if handler == nil { handler = http.DefaultServeMux } r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) handler.ServeHTTP(w, r) } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { return r, func() {} } var name string if h.FormatSpanName == nil { name = spanNameFromURL(r) } else { name = h.FormatSpanName(r) } ctx := r.Context() startOpts := h.StartOptions if h.GetStartOptions != nil { startOpts = h.GetStartOptions(r) } var span *trace.Span sc, ok := h.extractSpanContext(r) if ok && !h.IsPublicEndpoint { ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer)) } else { ctx, span = trace.StartSpan(ctx, name, trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer), ) if ok { span.AddLink(trace.Link{ TraceID: sc.TraceID, SpanID: sc.SpanID, Type: trace.LinkTypeParent, Attributes: nil, }) } } span.AddAttributes(requestAttrs(r)...) if r.Body == nil { // TODO: Handle cases where ContentLength is not set. } else if r.ContentLength > 0 { span.AddMessageReceiveEvent(0, /* TODO: messageID */ r.ContentLength, -1) } return r.WithContext(ctx), span.End } func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { if h.Propagation == nil { return defaultFormat.SpanContextFromRequest(r) } return h.Propagation.SpanContextFromRequest(r) } func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { ctx, _ := tag.New(r.Context(), tag.Upsert(Host, r.Host), tag.Upsert(Path, r.URL.Path), tag.Upsert(Method, r.Method)) track := &trackingResponseWriter{ start: time.Now(), ctx: ctx, writer: w, } if r.Body == nil { // TODO: Handle cases where ContentLength is not set. track.reqSize = -1 } else if r.ContentLength > 0 { track.reqSize = r.ContentLength } stats.Record(ctx, ServerRequestCount.M(1)) return track.wrappedResponseWriter(), track.end } type trackingResponseWriter struct { ctx context.Context reqSize int64 respSize int64 start time.Time statusCode int statusLine string endOnce sync.Once writer http.ResponseWriter } // Compile time assertion for ResponseWriter interface var _ http.ResponseWriter = (*trackingResponseWriter)(nil) func (t *trackingResponseWriter) end(tags *addedTags) { t.endOnce.Do(func() { if t.statusCode == 0 { t.statusCode = 200 } span := trace.FromContext(t.ctx) span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) m := []stats.Measurement{ ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), ServerResponseBytes.M(t.respSize), } if t.reqSize >= 0 { m = append(m, ServerRequestBytes.M(t.reqSize)) } allTags := make([]tag.Mutator, len(tags.t)+1) allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) copy(allTags[1:], tags.t) stats.RecordWithTags(t.ctx, allTags, m...) }) } func (t *trackingResponseWriter) Header() http.Header { return t.writer.Header() } func (t *trackingResponseWriter) Write(data []byte) (int, error) { n, err := t.writer.Write(data) t.respSize += int64(n) // Add message event for request bytes sent. span := trace.FromContext(t.ctx) span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) return n, err } func (t *trackingResponseWriter) WriteHeader(statusCode int) { t.writer.WriteHeader(statusCode) t.statusCode = statusCode t.statusLine = http.StatusText(t.statusCode) } // wrappedResponseWriter returns a wrapped version of the original // // ResponseWriter and only implements the same combination of additional // // interfaces as the original. // This implementation is based on https://github.com/felixge/httpsnoop. func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { var ( hj, i0 = t.writer.(http.Hijacker) cn, i1 = t.writer.(http.CloseNotifier) pu, i2 = t.writer.(http.Pusher) fl, i3 = t.writer.(http.Flusher) rf, i4 = t.writer.(io.ReaderFrom) ) switch { case !i0 && !i1 && !i2 && !i3 && !i4: return struct { http.ResponseWriter }{t} case !i0 && !i1 && !i2 && !i3 && i4: return struct { http.ResponseWriter io.ReaderFrom }{t, rf} case !i0 && !i1 && !i2 && i3 && !i4: return struct { http.ResponseWriter http.Flusher }{t, fl} case !i0 && !i1 && !i2 && i3 && i4: return struct { http.ResponseWriter http.Flusher io.ReaderFrom }{t, fl, rf} case !i0 && !i1 && i2 && !i3 && !i4: return struct { http.ResponseWriter http.Pusher }{t, pu} case !i0 && !i1 && i2 && !i3 && i4: return struct { http.ResponseWriter http.Pusher io.ReaderFrom }{t, pu, rf} case !i0 && !i1 && i2 && i3 && !i4: return struct { http.ResponseWriter http.Pusher http.Flusher }{t, pu, fl} case !i0 && !i1 && i2 && i3 && i4: return struct { http.ResponseWriter http.Pusher http.Flusher io.ReaderFrom }{t, pu, fl, rf} case !i0 && i1 && !i2 && !i3 && !i4: return struct { http.ResponseWriter http.CloseNotifier }{t, cn} case !i0 && i1 && !i2 && !i3 && i4: return struct { http.ResponseWriter http.CloseNotifier io.ReaderFrom }{t, cn, rf} case !i0 && i1 && !i2 && i3 && !i4: return struct { http.ResponseWriter http.CloseNotifier http.Flusher }{t, cn, fl} case !i0 && i1 && !i2 && i3 && i4: return struct { http.ResponseWriter http.CloseNotifier http.Flusher io.ReaderFrom }{t, cn, fl, rf} case !i0 && i1 && i2 && !i3 && !i4: return struct { http.ResponseWriter http.CloseNotifier http.Pusher }{t, cn, pu} case !i0 && i1 && i2 && !i3 && i4: return struct { http.ResponseWriter http.CloseNotifier http.Pusher io.ReaderFrom }{t, cn, pu, rf} case !i0 && i1 && i2 && i3 && !i4: return struct { http.ResponseWriter http.CloseNotifier http.Pusher http.Flusher }{t, cn, pu, fl} case !i0 && i1 && i2 && i3 && i4: return struct { http.ResponseWriter http.CloseNotifier http.Pusher http.Flusher io.ReaderFrom }{t, cn, pu, fl, rf} case i0 && !i1 && !i2 && !i3 && !i4: return struct { http.ResponseWriter http.Hijacker }{t, hj} case i0 && !i1 && !i2 && !i3 && i4: return struct { http.ResponseWriter http.Hijacker io.ReaderFrom }{t, hj, rf} case i0 && !i1 && !i2 && i3 && !i4: return struct { http.ResponseWriter http.Hijacker http.Flusher }{t, hj, fl} case i0 && !i1 && !i2 && i3 && i4: return struct { http.ResponseWriter http.Hijacker http.Flusher io.ReaderFrom }{t, hj, fl, rf} case i0 && !i1 && i2 && !i3 && !i4: return struct { http.ResponseWriter http.Hijacker http.Pusher }{t, hj, pu} case i0 && !i1 && i2 && !i3 && i4: return struct { http.ResponseWriter http.Hijacker http.Pusher io.ReaderFrom }{t, hj, pu, rf} case i0 && !i1 && i2 && i3 && !i4: return struct { http.ResponseWriter http.Hijacker http.Pusher http.Flusher }{t, hj, pu, fl} case i0 && !i1 && i2 && i3 && i4: return struct { http.ResponseWriter http.Hijacker http.Pusher http.Flusher io.ReaderFrom }{t, hj, pu, fl, rf} case i0 && i1 && !i2 && !i3 && !i4: return struct { http.ResponseWriter http.Hijacker http.CloseNotifier }{t, hj, cn} case i0 && i1 && !i2 && !i3 && i4: return struct { http.ResponseWriter http.Hijacker http.CloseNotifier io.ReaderFrom }{t, hj, cn, rf} case i0 && i1 && !i2 && i3 && !i4: return struct { http.ResponseWriter http.Hijacker http.CloseNotifier http.Flusher }{t, hj, cn, fl} case i0 && i1 && !i2 && i3 && i4: return struct { http.ResponseWriter http.Hijacker http.CloseNotifier http.Flusher io.ReaderFrom }{t, hj, cn, fl, rf} case i0 && i1 && i2 && !i3 && !i4: return struct { http.ResponseWriter http.Hijacker http.CloseNotifier http.Pusher }{t, hj, cn, pu} case i0 && i1 && i2 && !i3 && i4: return struct { http.ResponseWriter http.Hijacker http.CloseNotifier http.Pusher io.ReaderFrom }{t, hj, cn, pu, rf} case i0 && i1 && i2 && i3 && !i4: return struct { http.ResponseWriter http.Hijacker http.CloseNotifier http.Pusher http.Flusher }{t, hj, cn, pu, fl} case i0 && i1 && i2 && i3 && i4: return struct { http.ResponseWriter http.Hijacker http.CloseNotifier http.Pusher http.Flusher io.ReaderFrom }{t, hj, cn, pu, fl, rf} default: return struct { http.ResponseWriter }{t} } } opencensus-go-0.24.0/plugin/ochttp/server_test.go000066400000000000000000000406651433102037600220670ustar00rootroot00000000000000package ochttp import ( "bufio" "bytes" "context" "crypto/tls" "fmt" "io" "io/ioutil" "net" "net/http" "net/http/httptest" "strings" "sync" "testing" "time" "golang.org/x/net/http2" "go.opencensus.io/stats/view" "go.opencensus.io/trace" ) func httpHandler(statusCode, respSize int) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(statusCode) body := make([]byte, respSize) w.Write(body) }) } func updateMean(mean float64, sample, count int) float64 { if count == 1 { return float64(sample) } return mean + (float64(sample)-mean)/float64(count) } func TestHandlerStatsCollection(t *testing.T) { if err := view.Register(DefaultServerViews...); err != nil { t.Fatalf("Failed to register ochttp.DefaultServerViews error: %v", err) } views := []string{ "opencensus.io/http/server/request_count", "opencensus.io/http/server/latency", "opencensus.io/http/server/request_bytes", "opencensus.io/http/server/response_bytes", } // TODO: test latency measurements? tests := []struct { name, method, target string count, statusCode, reqSize, respSize int }{ {"get 200", "GET", "http://opencensus.io/request/one", 10, 200, 512, 512}, {"post 503", "POST", "http://opencensus.io/request/two", 5, 503, 1024, 16384}, {"no body 302", "GET", "http://opencensus.io/request/three", 2, 302, 0, 0}, } totalCount, meanReqSize, meanRespSize := 0, 0.0, 0.0 for _, test := range tests { t.Run(test.name, func(t *testing.T) { body := bytes.NewBuffer(make([]byte, test.reqSize)) r := httptest.NewRequest(test.method, test.target, body) w := httptest.NewRecorder() mux := http.NewServeMux() mux.Handle("/request/", httpHandler(test.statusCode, test.respSize)) h := &Handler{ Handler: mux, StartOptions: trace.StartOptions{ Sampler: trace.NeverSample(), }, } for i := 0; i < test.count; i++ { h.ServeHTTP(w, r) totalCount++ // Distributions do not track sum directly, we must // mimic their behaviour to avoid rounding failures. meanReqSize = updateMean(meanReqSize, test.reqSize, totalCount) meanRespSize = updateMean(meanRespSize, test.respSize, totalCount) } }) } for _, viewName := range views { v := view.Find(viewName) if v == nil { t.Errorf("view not found %q", viewName) continue } rows, err := view.RetrieveData(viewName) if err != nil { t.Error(err) continue } if got, want := len(rows), 1; got != want { t.Errorf("len(%q) = %d; want %d", viewName, got, want) continue } data := rows[0].Data var count int var sum float64 switch data := data.(type) { case *view.CountData: count = int(data.Value) case *view.DistributionData: count = int(data.Count) sum = data.Sum() default: t.Errorf("Unknown data type: %v", data) continue } if got, want := count, totalCount; got != want { t.Fatalf("%s = %d; want %d", viewName, got, want) } // We can only check sum for distribution views. switch viewName { case "opencensus.io/http/server/request_bytes": if got, want := sum, meanReqSize*float64(totalCount); got != want { t.Fatalf("%s = %g; want %g", viewName, got, want) } case "opencensus.io/http/server/response_bytes": if got, want := sum, meanRespSize*float64(totalCount); got != want { t.Fatalf("%s = %g; want %g", viewName, got, want) } } } } type testResponseWriterHijacker struct { httptest.ResponseRecorder } func (trw *testResponseWriterHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { return nil, nil, nil } func TestUnitTestHandlerProxiesHijack(t *testing.T) { tests := []struct { w http.ResponseWriter hasHijack bool }{ {httptest.NewRecorder(), false}, {nil, false}, {new(testResponseWriterHijacker), true}, } for i, tt := range tests { tw := &trackingResponseWriter{writer: tt.w} w := tw.wrappedResponseWriter() _, ttHijacker := w.(http.Hijacker) if want, have := tt.hasHijack, ttHijacker; want != have { t.Errorf("#%d Hijack got %t, want %t", i, have, want) } } } // Integration test with net/http to ensure that our Handler proxies to its // response the call to (http.Hijack).Hijacker() and that that successfully // passes with HTTP/1.1 connections. See Issue #642 func TestHandlerProxiesHijack_HTTP1(t *testing.T) { cst := httptest.NewServer(&Handler{ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var writeMsg func(string) defer func() { err := recover() writeMsg(fmt.Sprintf("Proto=%s\npanic=%v", r.Proto, err != nil)) }() conn, _, _ := w.(http.Hijacker).Hijack() writeMsg = func(msg string) { fmt.Fprintf(conn, "%s 200\nContentLength: %d", r.Proto, len(msg)) fmt.Fprintf(conn, "\r\n\r\n%s", msg) conn.Close() } }), }) defer cst.Close() testCases := []struct { name string tr *http.Transport want string }{ { name: "http1-transport", tr: new(http.Transport), want: "Proto=HTTP/1.1\npanic=false", }, { name: "http2-transport", tr: func() *http.Transport { tr := new(http.Transport) http2.ConfigureTransport(tr) return tr }(), want: "Proto=HTTP/1.1\npanic=false", }, } for _, tc := range testCases { c := &http.Client{Transport: &Transport{Base: tc.tr}} res, err := c.Get(cst.URL) if err != nil { t.Errorf("(%s) unexpected error %v", tc.name, err) continue } blob, _ := ioutil.ReadAll(res.Body) res.Body.Close() if g, w := string(blob), tc.want; g != w { t.Errorf("(%s) got = %q; want = %q", tc.name, g, w) } } } // Integration test with net/http, x/net/http2 to ensure that our Handler proxies // to its response the call to (http.Hijack).Hijacker() and that that crashes // since http.Hijacker and HTTP/2.0 connections are incompatible, but the // detection is only at runtime and ensure that we can stream and flush to the // connection even after invoking Hijack(). See Issue #642. func TestHandlerProxiesHijack_HTTP2(t *testing.T) { cst := httptest.NewUnstartedServer(&Handler{ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if _, ok := w.(http.Hijacker); ok { conn, _, err := w.(http.Hijacker).Hijack() if conn != nil { data := fmt.Sprintf("Surprisingly got the Hijacker() Proto: %s", r.Proto) fmt.Fprintf(conn, "%s 200\nContent-Length:%d\r\n\r\n%s", r.Proto, len(data), data) conn.Close() return } switch { case err == nil: fmt.Fprintf(w, "Unexpectedly did not encounter an error!") default: fmt.Fprintf(w, "Unexpected error: %v", err) case strings.Contains(err.(error).Error(), "Hijack"): // Confirmed HTTP/2.0, let's stream to it for i := 0; i < 5; i++ { fmt.Fprintf(w, "%d\n", i) w.(http.Flusher).Flush() } } } else { // Confirmed HTTP/2.0, let's stream to it for i := 0; i < 5; i++ { fmt.Fprintf(w, "%d\n", i) w.(http.Flusher).Flush() } } }), }) cst.TLS = &tls.Config{NextProtos: []string{"h2"}} cst.StartTLS() defer cst.Close() if wantPrefix := "https://"; !strings.HasPrefix(cst.URL, wantPrefix) { t.Fatalf("URL got = %q wantPrefix = %q", cst.URL, wantPrefix) } tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} http2.ConfigureTransport(tr) c := &http.Client{Transport: tr} res, err := c.Get(cst.URL) if err != nil { t.Fatalf("Unexpected error %v", err) } blob, _ := ioutil.ReadAll(res.Body) res.Body.Close() if g, w := string(blob), "0\n1\n2\n3\n4\n"; g != w { t.Errorf("got = %q; want = %q", g, w) } } func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) { // Ensure that the trackingResponseWriter always sets the spanStatus on ending the span. // Because we can only examine the Status after exporting, this test roundtrips a // couple of requests and then later examines the exported spans. // See Issue #700. exporter := &spanExporter{cur: make(chan *trace.SpanData, 1)} trace.RegisterExporter(exporter) defer trace.UnregisterExporter(exporter) tests := []struct { res *http.Response want trace.Status }{ {res: &http.Response{StatusCode: 200}, want: trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, {res: &http.Response{StatusCode: 500}, want: trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, {res: &http.Response{StatusCode: 403}, want: trace.Status{Code: trace.StatusCodePermissionDenied, Message: `PERMISSION_DENIED`}}, {res: &http.Response{StatusCode: 401}, want: trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `UNAUTHENTICATED`}}, {res: &http.Response{StatusCode: 429}, want: trace.Status{Code: trace.StatusCodeResourceExhausted, Message: `RESOURCE_EXHAUSTED`}}, } for _, tt := range tests { t.Run(tt.want.Message, func(t *testing.T) { ctx := context.Background() prc, pwc := io.Pipe() go func() { pwc.Write([]byte("Foo")) pwc.Close() }() inRes := tt.res inRes.Body = prc tr := &traceTransport{ base: &testResponseTransport{res: inRes}, formatSpanName: spanNameFromURL, startOptions: trace.StartOptions{ Sampler: trace.AlwaysSample(), }, } req, err := http.NewRequest("POST", "https://example.org", bytes.NewReader([]byte("testing"))) if err != nil { t.Fatalf("NewRequest error: %v", err) } req = req.WithContext(ctx) res, err := tr.RoundTrip(req) if err != nil { t.Fatalf("RoundTrip error: %v", err) } _, _ = ioutil.ReadAll(res.Body) res.Body.Close() cur := <-exporter.cur if got, want := cur.Status, tt.want; got != want { t.Fatalf("SpanData:\ngot = (%#v)\nwant = (%#v)", got, want) } }) } } type spanExporter struct { sync.Mutex cur chan *trace.SpanData } var _ trace.Exporter = (*spanExporter)(nil) func (se *spanExporter) ExportSpan(sd *trace.SpanData) { se.Lock() se.cur <- sd se.Unlock() } type testResponseTransport struct { res *http.Response } var _ http.RoundTripper = (*testResponseTransport)(nil) func (rb *testResponseTransport) RoundTrip(*http.Request) (*http.Response, error) { return rb.res, nil } func TestHandlerImplementsHTTPPusher(t *testing.T) { cst := setupAndStartServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { pusher, ok := w.(http.Pusher) if !ok { w.Write([]byte("false")) return } err := pusher.Push("/static.css", &http.PushOptions{ Method: "GET", Header: http.Header{"Accept-Encoding": r.Header["Accept-Encoding"]}, }) if err != nil && false { // TODO: (@odeke-em) consult with Go stdlib for why trying // to configure even an HTTP/2 server and HTTP/2 transport // still return http.ErrNotSupported even without using ochttp.Handler. http.Error(w, err.Error(), http.StatusBadRequest) return } w.Write([]byte("true")) }), asHTTP2) defer cst.Close() tests := []struct { rt http.RoundTripper wantBody string }{ { rt: h1Transport(), wantBody: "false", }, { rt: h2Transport(), wantBody: "true", }, { rt: &Transport{Base: h1Transport()}, wantBody: "false", }, { rt: &Transport{Base: h2Transport()}, wantBody: "true", }, } for i, tt := range tests { c := &http.Client{Transport: &Transport{Base: tt.rt}} res, err := c.Get(cst.URL) if err != nil { t.Errorf("#%d: Unexpected error %v", i, err) continue } body, _ := ioutil.ReadAll(res.Body) _ = res.Body.Close() if g, w := string(body), tt.wantBody; g != w { t.Errorf("#%d: got = %q; want = %q", i, g, w) } } } const ( isNil = "isNil" hang = "hang" ended = "ended" nonNotifier = "nonNotifier" asHTTP1 = false asHTTP2 = true ) func setupAndStartServer(hf func(http.ResponseWriter, *http.Request), isHTTP2 bool) *httptest.Server { cst := httptest.NewUnstartedServer(&Handler{ Handler: http.HandlerFunc(hf), }) if isHTTP2 { http2.ConfigureServer(cst.Config, new(http2.Server)) cst.TLS = cst.Config.TLSConfig cst.StartTLS() } else { cst.Start() } return cst } func insecureTLS() *tls.Config { return &tls.Config{InsecureSkipVerify: true} } func h1Transport() *http.Transport { return &http.Transport{TLSClientConfig: insecureTLS()} } func h2Transport() *http.Transport { tr := &http.Transport{TLSClientConfig: insecureTLS()} http2.ConfigureTransport(tr) return tr } type concurrentBuffer struct { sync.RWMutex bw *bytes.Buffer } func (cw *concurrentBuffer) Write(b []byte) (int, error) { cw.Lock() defer cw.Unlock() return cw.bw.Write(b) } func (cw *concurrentBuffer) String() string { cw.Lock() defer cw.Unlock() return cw.bw.String() } func handleCloseNotify(outLog io.Writer) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cn, ok := w.(http.CloseNotifier) if !ok { fmt.Fprintln(outLog, nonNotifier) return } ch := cn.CloseNotify() if ch == nil { fmt.Fprintln(outLog, isNil) return } <-ch fmt.Fprintln(outLog, ended) }) } func TestHandlerImplementsHTTPCloseNotify(t *testing.T) { http1Log := &concurrentBuffer{bw: new(bytes.Buffer)} http1Server := setupAndStartServer(handleCloseNotify(http1Log), asHTTP1) http2Log := &concurrentBuffer{bw: new(bytes.Buffer)} http2Server := setupAndStartServer(handleCloseNotify(http2Log), asHTTP2) defer http1Server.Close() defer http2Server.Close() tests := []struct { url string want string }{ {url: http1Server.URL, want: nonNotifier}, {url: http2Server.URL, want: ended}, } transports := []struct { name string rt http.RoundTripper }{ {name: "http2+ochttp", rt: &Transport{Base: h2Transport()}}, {name: "http1+ochttp", rt: &Transport{Base: h1Transport()}}, {name: "http1-ochttp", rt: h1Transport()}, {name: "http2-ochttp", rt: h2Transport()}, } // Each transport invokes one of two server types, either HTTP/1 or HTTP/2 for _, trc := range transports { // Try out all the transport combinations for i, tt := range tests { req, err := http.NewRequest("GET", tt.url, nil) if err != nil { t.Errorf("#%d: Unexpected error making request: %v", i, err) continue } // Using a timeout to ensure that the request is cancelled and the server // if its handler implements CloseNotify will see this as the client leaving. ctx, cancel := context.WithTimeout(context.Background(), 80*time.Millisecond) defer cancel() req = req.WithContext(ctx) client := &http.Client{Transport: trc.rt} res, err := client.Do(req) if err != nil && !strings.Contains(err.Error(), "context deadline exceeded") { t.Errorf("#%d: %sClient Unexpected error %v", i, trc.name, err) continue } if res != nil && res.Body != nil { io.CopyN(ioutil.Discard, res.Body, 5) _ = res.Body.Close() } } } // Wait for a couple of milliseconds for the GoAway frames to be properly propagated <-time.After(200 * time.Millisecond) wantHTTP1Log := strings.Repeat("ended\n", len(transports)) wantHTTP2Log := strings.Repeat("ended\n", len(transports)) if g, w := http1Log.String(), wantHTTP1Log; g != w { t.Errorf("HTTP1Log got\n\t%q\nwant\n\t%q", g, w) } if g, w := http2Log.String(), wantHTTP2Log; g != w { t.Errorf("HTTP2Log got\n\t%q\nwant\n\t%q", g, w) } } func testHealthEndpointSkipArray(r *http.Request) bool { for _, toSkip := range []string{"/health", "/metrics"} { if r.URL.Path == toSkip { return true } } return false } func TestIgnoreHealthEndpoints(t *testing.T) { var spans int client := &http.Client{} tests := []struct { path string healthEndpointFunc func(*http.Request) bool }{ {"/healthz", nil}, {"/_ah/health", nil}, {"/healthz", testHealthEndpointSkipArray}, {"/_ah/health", testHealthEndpointSkipArray}, {"/health", testHealthEndpointSkipArray}, {"/metrics", testHealthEndpointSkipArray}, } for _, tt := range tests { t.Run(tt.path, func(t *testing.T) { ts := httptest.NewServer(&Handler{ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.FromContext(r.Context()) if span != nil { spans++ } fmt.Fprint(w, "ok") }), StartOptions: trace.StartOptions{ Sampler: trace.AlwaysSample(), }, IsHealthEndpoint: tt.healthEndpointFunc, }) defer ts.Close() resp, err := client.Get(ts.URL + tt.path) if err != nil { t.Fatalf("Cannot GET %q: %v", tt.path, err) } b, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("Cannot read body for %q: %v", tt.path, err) } if got, want := string(b), "ok"; got != want { t.Fatalf("Body for %q = %q; want %q", tt.path, got, want) } resp.Body.Close() }) } if spans > 0 { t.Errorf("Got %v spans; want no spans", spans) } } opencensus-go-0.24.0/plugin/ochttp/span_annotating_client_trace.go000066400000000000000000000120001433102037600253770ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "crypto/tls" "net/http" "net/http/httptrace" "strings" "go.opencensus.io/trace" ) type spanAnnotator struct { sp *trace.Span } // TODO: Remove NewSpanAnnotator at the next release. // NewSpanAnnotator returns a httptrace.ClientTrace which annotates // all emitted httptrace events on the provided Span. // Deprecated: Use NewSpanAnnotatingClientTrace instead func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { return NewSpanAnnotatingClientTrace(r, s) } // NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates // all emitted httptrace events on the provided Span. func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { sa := spanAnnotator{sp: s} return &httptrace.ClientTrace{ GetConn: sa.getConn, GotConn: sa.gotConn, PutIdleConn: sa.putIdleConn, GotFirstResponseByte: sa.gotFirstResponseByte, Got100Continue: sa.got100Continue, DNSStart: sa.dnsStart, DNSDone: sa.dnsDone, ConnectStart: sa.connectStart, ConnectDone: sa.connectDone, TLSHandshakeStart: sa.tlsHandshakeStart, TLSHandshakeDone: sa.tlsHandshakeDone, WroteHeaders: sa.wroteHeaders, Wait100Continue: sa.wait100Continue, WroteRequest: sa.wroteRequest, } } func (s spanAnnotator) getConn(hostPort string) { attrs := []trace.Attribute{ trace.StringAttribute("httptrace.get_connection.host_port", hostPort), } s.sp.Annotate(attrs, "GetConn") } func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { attrs := []trace.Attribute{ trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), } if info.WasIdle { attrs = append(attrs, trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) } s.sp.Annotate(attrs, "GotConn") } // PutIdleConn implements a httptrace.ClientTrace hook func (s spanAnnotator) putIdleConn(err error) { var attrs []trace.Attribute if err != nil { attrs = append(attrs, trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) } s.sp.Annotate(attrs, "PutIdleConn") } func (s spanAnnotator) gotFirstResponseByte() { s.sp.Annotate(nil, "GotFirstResponseByte") } func (s spanAnnotator) got100Continue() { s.sp.Annotate(nil, "Got100Continue") } func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { attrs := []trace.Attribute{ trace.StringAttribute("httptrace.dns_start.host", info.Host), } s.sp.Annotate(attrs, "DNSStart") } func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { var addrs []string for _, addr := range info.Addrs { addrs = append(addrs, addr.String()) } attrs := []trace.Attribute{ trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), } if info.Err != nil { attrs = append(attrs, trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) } s.sp.Annotate(attrs, "DNSDone") } func (s spanAnnotator) connectStart(network, addr string) { attrs := []trace.Attribute{ trace.StringAttribute("httptrace.connect_start.network", network), trace.StringAttribute("httptrace.connect_start.addr", addr), } s.sp.Annotate(attrs, "ConnectStart") } func (s spanAnnotator) connectDone(network, addr string, err error) { attrs := []trace.Attribute{ trace.StringAttribute("httptrace.connect_done.network", network), trace.StringAttribute("httptrace.connect_done.addr", addr), } if err != nil { attrs = append(attrs, trace.StringAttribute("httptrace.connect_done.error", err.Error())) } s.sp.Annotate(attrs, "ConnectDone") } func (s spanAnnotator) tlsHandshakeStart() { s.sp.Annotate(nil, "TLSHandshakeStart") } func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { var attrs []trace.Attribute if err != nil { attrs = append(attrs, trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) } s.sp.Annotate(attrs, "TLSHandshakeDone") } func (s spanAnnotator) wroteHeaders() { s.sp.Annotate(nil, "WroteHeaders") } func (s spanAnnotator) wait100Continue() { s.sp.Annotate(nil, "Wait100Continue") } func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { var attrs []trace.Attribute if info.Err != nil { attrs = append(attrs, trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) } s.sp.Annotate(attrs, "WroteRequest") } opencensus-go-0.24.0/plugin/ochttp/span_annotating_client_trace_test.go000066400000000000000000000051631433102037600264520ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp_test import ( "errors" "net/http" "net/http/httptest" "strings" "sync" "testing" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/trace" ) func TestSpanAnnotatingClientTrace(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { resp.Write([]byte("Hello, world!")) })) defer server.Close() recorder := &testExporter{} trace.RegisterExporter(recorder) tr := ochttp.Transport{ NewClientTrace: ochttp.NewSpanAnnotatingClientTrace, StartOptions: trace.StartOptions{ Sampler: trace.AlwaysSample(), }, } req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body")) if err != nil { t.Errorf("error creating request: %v", err) } resp, err := tr.RoundTrip(req) if err != nil { t.Errorf("response error: %v", err) } if err := resp.Body.Close(); err != nil { t.Errorf("error closing response body: %v", err) } if got, want := resp.StatusCode, 200; got != want { t.Errorf("resp.StatusCode=%d; want=%d", got, want) } if got, want := len(recorder.spans), 1; got != want { t.Fatalf("span count=%d; want=%d", got, want) } var annotations []string for _, annotation := range recorder.spans[0].Annotations { annotations = append(annotations, annotation.Message) } required := []string{ "GetConn", "GotConn", "GotFirstResponseByte", "ConnectStart", "ConnectDone", "WroteHeaders", "WroteRequest", } if errs := requiredAnnotations(required, annotations); len(errs) > 0 { for _, err := range errs { t.Error(err) } } } type testExporter struct { mu sync.Mutex spans []*trace.SpanData } func (t *testExporter) ExportSpan(s *trace.SpanData) { t.mu.Lock() t.spans = append(t.spans, s) t.mu.Unlock() } func requiredAnnotations(required []string, list []string) []error { var errs []error for _, item := range required { var found bool for _, v := range list { if v == item { found = true } } if !found { errs = append(errs, errors.New("missing expected annotation: "+item)) } } return errs } opencensus-go-0.24.0/plugin/ochttp/stats.go000066400000000000000000000254011433102037600206470ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" ) // Deprecated: client HTTP measures. var ( // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. ClientRequestCount = stats.Int64( "opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) // Deprecated: Use ClientSentBytes. ClientRequestBytes = stats.Int64( "opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) // Deprecated: Use ClientReceivedBytes. ClientResponseBytes = stats.Int64( "opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) // Deprecated: Use ClientRoundtripLatency. ClientLatency = stats.Float64( "opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) ) // The following client HTTP measures are supported for use in custom views. var ( ClientSentBytes = stats.Int64( "opencensus.io/http/client/sent_bytes", "Total bytes sent in request body (not including headers)", stats.UnitBytes, ) ClientReceivedBytes = stats.Int64( "opencensus.io/http/client/received_bytes", "Total bytes received in response bodies (not including headers but including error responses with bodies)", stats.UnitBytes, ) ClientRoundtripLatency = stats.Float64( "opencensus.io/http/client/roundtrip_latency", "Time between first byte of request headers sent to last byte of response received, or terminal error", stats.UnitMilliseconds, ) ) // The following server HTTP measures are supported for use in custom views: var ( ServerRequestCount = stats.Int64( "opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) ServerRequestBytes = stats.Int64( "opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) ServerResponseBytes = stats.Int64( "opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) ServerLatency = stats.Float64( "opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) ) // The following tags are applied to stats recorded by this package. Host, Path // and Method are applied to all measures. StatusCode is not applied to // ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. var ( // Host is the value of the HTTP Host header. // // The value of this tag can be controlled by the HTTP client, so you need // to watch out for potentially generating high-cardinality labels in your // metrics backend if you use this tag in views. Host = tag.MustNewKey("http.host") // StatusCode is the numeric HTTP response status code, // or "error" if a transport error occurred and no status code was read. StatusCode = tag.MustNewKey("http.status") // Path is the URL path (not including query string) in the request. // // The value of this tag can be controlled by the HTTP client, so you need // to watch out for potentially generating high-cardinality labels in your // metrics backend if you use this tag in views. Path = tag.MustNewKey("http.path") // Method is the HTTP method of the request, capitalized (GET, POST, etc.). Method = tag.MustNewKey("http.method") // KeyServerRoute is a low cardinality string representing the logical // handler of the request. This is usually the pattern registered on the a // ServeMux (or similar string). KeyServerRoute = tag.MustNewKey("http_server_route") ) // Client tag keys. var ( // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). KeyClientMethod = tag.MustNewKey("http_client_method") // KeyClientPath is the URL path (not including query string). KeyClientPath = tag.MustNewKey("http_client_path") // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. KeyClientStatus = tag.MustNewKey("http_client_status") // KeyClientHost is the value of the request Host header. KeyClientHost = tag.MustNewKey("http_client_host") ) // Default distributions used by views in this package. var ( DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) ) // Package ochttp provides some convenience views for client measures. // You still need to register these views for data to actually be collected. var ( ClientSentBytesDistribution = &view.View{ Name: "opencensus.io/http/client/sent_bytes", Measure: ClientSentBytes, Aggregation: DefaultSizeDistribution, Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, } ClientReceivedBytesDistribution = &view.View{ Name: "opencensus.io/http/client/received_bytes", Measure: ClientReceivedBytes, Aggregation: DefaultSizeDistribution, Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, } ClientRoundtripLatencyDistribution = &view.View{ Name: "opencensus.io/http/client/roundtrip_latency", Measure: ClientRoundtripLatency, Aggregation: DefaultLatencyDistribution, Description: "End-to-end latency, by HTTP method and response status", TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, } ClientCompletedCount = &view.View{ Name: "opencensus.io/http/client/completed_count", Measure: ClientRoundtripLatency, Aggregation: view.Count(), Description: "Count of completed requests, by HTTP method and response status", TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, } ) // Deprecated: Old client Views. var ( // Deprecated: No direct replacement, but see ClientCompletedCount. ClientRequestCountView = &view.View{ Name: "opencensus.io/http/client/request_count", Description: "Count of HTTP requests started", Measure: ClientRequestCount, Aggregation: view.Count(), } // Deprecated: Use ClientSentBytesDistribution. ClientRequestBytesView = &view.View{ Name: "opencensus.io/http/client/request_bytes", Description: "Size distribution of HTTP request body", Measure: ClientSentBytes, Aggregation: DefaultSizeDistribution, } // Deprecated: Use ClientReceivedBytesDistribution instead. ClientResponseBytesView = &view.View{ Name: "opencensus.io/http/client/response_bytes", Description: "Size distribution of HTTP response body", Measure: ClientReceivedBytes, Aggregation: DefaultSizeDistribution, } // Deprecated: Use ClientRoundtripLatencyDistribution instead. ClientLatencyView = &view.View{ Name: "opencensus.io/http/client/latency", Description: "Latency distribution of HTTP requests", Measure: ClientRoundtripLatency, Aggregation: DefaultLatencyDistribution, } // Deprecated: Use ClientCompletedCount instead. ClientRequestCountByMethod = &view.View{ Name: "opencensus.io/http/client/request_count_by_method", Description: "Client request count by HTTP method", TagKeys: []tag.Key{Method}, Measure: ClientSentBytes, Aggregation: view.Count(), } // Deprecated: Use ClientCompletedCount instead. ClientResponseCountByStatusCode = &view.View{ Name: "opencensus.io/http/client/response_count_by_status_code", Description: "Client response count by status code", TagKeys: []tag.Key{StatusCode}, Measure: ClientRoundtripLatency, Aggregation: view.Count(), } ) // Package ochttp provides some convenience views for server measures. // You still need to register these views for data to actually be collected. var ( ServerRequestCountView = &view.View{ Name: "opencensus.io/http/server/request_count", Description: "Count of HTTP requests started", Measure: ServerRequestCount, Aggregation: view.Count(), } ServerRequestBytesView = &view.View{ Name: "opencensus.io/http/server/request_bytes", Description: "Size distribution of HTTP request body", Measure: ServerRequestBytes, Aggregation: DefaultSizeDistribution, } ServerResponseBytesView = &view.View{ Name: "opencensus.io/http/server/response_bytes", Description: "Size distribution of HTTP response body", Measure: ServerResponseBytes, Aggregation: DefaultSizeDistribution, } ServerLatencyView = &view.View{ Name: "opencensus.io/http/server/latency", Description: "Latency distribution of HTTP requests", Measure: ServerLatency, Aggregation: DefaultLatencyDistribution, } ServerRequestCountByMethod = &view.View{ Name: "opencensus.io/http/server/request_count_by_method", Description: "Server request count by HTTP method", TagKeys: []tag.Key{Method}, Measure: ServerRequestCount, Aggregation: view.Count(), } ServerResponseCountByStatusCode = &view.View{ Name: "opencensus.io/http/server/response_count_by_status_code", Description: "Server response count by status code", TagKeys: []tag.Key{StatusCode}, Measure: ServerLatency, Aggregation: view.Count(), } ) // DefaultClientViews are the default client views provided by this package. // Deprecated: No replacement. Register the views you would like individually. var DefaultClientViews = []*view.View{ ClientRequestCountView, ClientRequestBytesView, ClientResponseBytesView, ClientLatencyView, ClientRequestCountByMethod, ClientResponseCountByStatusCode, } // DefaultServerViews are the default server views provided by this package. // Deprecated: No replacement. Register the views you would like individually. var DefaultServerViews = []*view.View{ ServerRequestCountView, ServerRequestBytesView, ServerResponseBytesView, ServerLatencyView, ServerRequestCountByMethod, ServerResponseCountByStatusCode, } opencensus-go-0.24.0/plugin/ochttp/stats_test.go000066400000000000000000000045261433102037600217130ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "reflect" "strings" "testing" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" ) func TestClientViews(t *testing.T) { for _, v := range []*view.View{ ClientSentBytesDistribution, ClientReceivedBytesDistribution, ClientRoundtripLatencyDistribution, ClientCompletedCount, } { if v.Measure == nil { t.Fatalf("nil measure: %v", v) } if m := v.Measure.Name(); !strings.HasPrefix(m, "opencensus.io/http/client/") { t.Errorf("Unexpected measure name prefix: %v", v) } if v.Name == "" { t.Errorf("Empty name: %v", v) } if !strings.HasPrefix(v.Name, "opencensus.io/http/client/") { t.Errorf("Unexpected prefix: %s", v.Name) } if v.Description == "" { t.Errorf("Empty description: %s", v.Name) } if !reflect.DeepEqual(v.TagKeys, []tag.Key{KeyClientMethod, KeyClientStatus}) { t.Errorf("Unexpected tags for client view %s: %v", v.Name, v.TagKeys) } if strings.HasSuffix(v.Description, ".") { t.Errorf("View description should not end with a period: %s", v.Name) } } } func TestClientTagKeys(t *testing.T) { for _, k := range []tag.Key{ KeyClientStatus, KeyClientMethod, KeyClientHost, KeyClientPath, } { if !strings.HasPrefix(k.Name(), "http_client_") { t.Errorf("Unexpected prefix: %s", k.Name()) } } } func TestClientMeasures(t *testing.T) { for _, m := range []stats.Measure{ ClientSentBytes, ClientReceivedBytes, ClientRoundtripLatency, } { if !strings.HasPrefix(m.Name(), "opencensus.io/http/client/") { t.Errorf("Unexpected prefix: %v", m) } if strings.HasSuffix(m.Description(), ".") { t.Errorf("View description should not end with a period: %s", m.Name()) } if len(m.Unit()) == 0 { t.Errorf("No unit: %s", m.Name()) } } } opencensus-go-0.24.0/plugin/ochttp/testdata/000077500000000000000000000000001433102037600207715ustar00rootroot00000000000000opencensus-go-0.24.0/plugin/ochttp/testdata/download-test-cases.sh000077500000000000000000000004131433102037600252060ustar00rootroot00000000000000# This script downloads latest test cases from specs # TODO: change the link to when test cases are merged to specs repo curl https://raw.githubusercontent.com/census-instrumentation/opencensus-specs/master/trace/http-out-test-cases.json -o http-out-test-cases.jsonopencensus-go-0.24.0/plugin/ochttp/testdata/http-out-test-cases.json000066400000000000000000000171371433102037600255320ustar00rootroot00000000000000[ { "name": "Successful GET call to https://example.com", "method": "GET", "url": "https://example.com/", "spanName": "/", "spanStatus": "OK", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "example.com", "http.status_code": "200", "http.url": "https://example.com/" } }, { "name": "Successfully POST call to https://example.com", "method": "POST", "url": "https://example.com/", "spanName": "/", "spanStatus": "OK", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "POST", "http.host": "example.com", "http.status_code": "200", "http.url": "https://example.com/" } }, { "name": "Name is populated as a path", "method": "GET", "url": "http://{host}:{port}/path/to/resource/", "responseCode": 200, "spanName": "/path/to/resource/", "spanStatus": "OK", "spanKind": "Client", "spanAttributes": { "http.path": "/path/to/resource/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "200", "http.url": "http://{host}:{port}/path/to/resource/" } }, { "name": "Call that cannot resolve DNS will be reported as error span", "method": "GET", "url": "https://sdlfaldfjalkdfjlkajdflkajlsdjf.sdlkjafsdjfalfadslkf.com/", "spanName": "/", "spanStatus": "UNKNOWN", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "sdlfaldfjalkdfjlkajdflkajlsdjf.sdlkjafsdjfalfadslkf.com", "http.url": "https://sdlfaldfjalkdfjlkajdflkajlsdjf.sdlkjafsdjfalfadslkf.com/" } }, { "name": "Response code: 199. This test case is not possible to implement on some platforms as they don't allow to return this status code. Keeping this test case for visibility, but it actually simply a fallback into 200 test case", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 200, "spanName": "/", "spanStatus": "OK", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "200", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 200", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 200, "spanName": "/", "spanStatus": "OK", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "200", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 399", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 399, "spanName": "/", "spanStatus": "OK", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "399", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 400", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 400, "spanName": "/", "spanStatus": "INVALID_ARGUMENT", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "400", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 401", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 401, "spanName": "/", "spanStatus": "UNAUTHENTICATED", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "401", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 403", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 403, "spanName": "/", "spanStatus": "PERMISSION_DENIED", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "403", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 404", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 404, "spanName": "/", "spanStatus": "NOT_FOUND", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "404", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 429", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 429, "spanName": "/", "spanStatus": "RESOURCE_EXHAUSTED", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "429", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 501", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 501, "spanName": "/", "spanStatus": "UNIMPLEMENTED", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "501", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 503", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 503, "spanName": "/", "spanStatus": "UNAVAILABLE", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "503", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 504", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 504, "spanName": "/", "spanStatus": "DEADLINE_EXCEEDED", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "504", "http.url": "http://{host}:{port}/" } }, { "name": "Response code: 600", "method": "GET", "url": "http://{host}:{port}/", "responseCode": 600, "spanName": "/", "spanStatus": "UNKNOWN", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "600", "http.url": "http://{host}:{port}/" } }, { "name": "User agent attribute populated", "method": "GET", "url": "http://{host}:{port}/", "headers": { "User-Agent": "test-user-agent" }, "responseCode": 200, "spanName": "/", "spanStatus": "OK", "spanKind": "Client", "spanAttributes": { "http.path": "/", "http.method": "GET", "http.host": "{host}:{port}", "http.status_code": "200", "http.user_agent": "test-user-agent", "http.url": "http://{host}:{port}/" } } ]opencensus-go-0.24.0/plugin/ochttp/trace.go000066400000000000000000000166501433102037600206150ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "io" "net/http" "net/http/httptrace" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" ) // TODO(jbd): Add godoc examples. var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} // Attributes recorded on the span for the requests. // Only trace exporters will need them. const ( HostAttribute = "http.host" MethodAttribute = "http.method" PathAttribute = "http.path" URLAttribute = "http.url" UserAgentAttribute = "http.user_agent" StatusCodeAttribute = "http.status_code" ) type traceTransport struct { base http.RoundTripper startOptions trace.StartOptions format propagation.HTTPFormat formatSpanName func(*http.Request) string newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace } // TODO(jbd): Add message events for request and response size. // RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. // The created span can follow a parent span, if a parent is presented in // the request's context. func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { name := t.formatSpanName(req) // TODO(jbd): Discuss whether we want to prefix // outgoing requests with Sent. ctx, span := trace.StartSpan(req.Context(), name, trace.WithSampler(t.startOptions.Sampler), trace.WithSpanKind(trace.SpanKindClient)) if t.newClientTrace != nil { req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) } else { req = req.WithContext(ctx) } if t.format != nil { // SpanContextToRequest will modify its Request argument, which is // contrary to the contract for http.RoundTripper, so we need to // pass it a copy of the Request. // However, the Request struct itself was already copied by // the WithContext calls above and so we just need to copy the header. header := make(http.Header) for k, v := range req.Header { header[k] = v } req.Header = header t.format.SpanContextToRequest(span.SpanContext(), req) } span.AddAttributes(requestAttrs(req)...) resp, err := t.base.RoundTrip(req) if err != nil { span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) span.End() return resp, err } span.AddAttributes(responseAttrs(resp)...) span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) // span.End() will be invoked after // a read from resp.Body returns io.EOF or when // resp.Body.Close() is invoked. bt := &bodyTracker{rc: resp.Body, span: span} resp.Body = wrappedBody(bt, resp.Body) return resp, err } // bodyTracker wraps a response.Body and invokes // trace.EndSpan on encountering io.EOF on reading // the body of the original response. type bodyTracker struct { rc io.ReadCloser span *trace.Span } var _ io.ReadCloser = (*bodyTracker)(nil) func (bt *bodyTracker) Read(b []byte) (int, error) { n, err := bt.rc.Read(b) switch err { case nil: return n, nil case io.EOF: bt.span.End() default: // For all other errors, set the span status bt.span.SetStatus(trace.Status{ // Code 2 is the error code for Internal server error. Code: 2, Message: err.Error(), }) } return n, err } func (bt *bodyTracker) Close() error { // Invoking endSpan on Close will help catch the cases // in which a read returned a non-nil error, we set the // span status but didn't end the span. bt.span.End() return bt.rc.Close() } // CancelRequest cancels an in-flight request by closing its connection. func (t *traceTransport) CancelRequest(req *http.Request) { type canceler interface { CancelRequest(*http.Request) } if cr, ok := t.base.(canceler); ok { cr.CancelRequest(req) } } func spanNameFromURL(req *http.Request) string { return req.URL.Path } func requestAttrs(r *http.Request) []trace.Attribute { userAgent := r.UserAgent() attrs := make([]trace.Attribute, 0, 5) attrs = append(attrs, trace.StringAttribute(PathAttribute, r.URL.Path), trace.StringAttribute(URLAttribute, r.URL.String()), trace.StringAttribute(HostAttribute, r.Host), trace.StringAttribute(MethodAttribute, r.Method), ) if userAgent != "" { attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) } return attrs } func responseAttrs(resp *http.Response) []trace.Attribute { return []trace.Attribute{ trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), } } // TraceStatus is a utility to convert the HTTP status code to a trace.Status that // represents the outcome as closely as possible. func TraceStatus(httpStatusCode int, statusLine string) trace.Status { var code int32 if httpStatusCode < 200 || httpStatusCode >= 400 { code = trace.StatusCodeUnknown } switch httpStatusCode { case 499: code = trace.StatusCodeCancelled case http.StatusBadRequest: code = trace.StatusCodeInvalidArgument case http.StatusUnprocessableEntity: code = trace.StatusCodeInvalidArgument case http.StatusGatewayTimeout: code = trace.StatusCodeDeadlineExceeded case http.StatusNotFound: code = trace.StatusCodeNotFound case http.StatusForbidden: code = trace.StatusCodePermissionDenied case http.StatusUnauthorized: // 401 is actually unauthenticated. code = trace.StatusCodeUnauthenticated case http.StatusTooManyRequests: code = trace.StatusCodeResourceExhausted case http.StatusNotImplemented: code = trace.StatusCodeUnimplemented case http.StatusServiceUnavailable: code = trace.StatusCodeUnavailable case http.StatusOK: code = trace.StatusCodeOK case http.StatusConflict: code = trace.StatusCodeAlreadyExists } return trace.Status{Code: code, Message: codeToStr[code]} } var codeToStr = map[int32]string{ trace.StatusCodeOK: `OK`, trace.StatusCodeCancelled: `CANCELLED`, trace.StatusCodeUnknown: `UNKNOWN`, trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, trace.StatusCodeNotFound: `NOT_FOUND`, trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, trace.StatusCodeAborted: `ABORTED`, trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, trace.StatusCodeInternal: `INTERNAL`, trace.StatusCodeUnavailable: `UNAVAILABLE`, trace.StatusCodeDataLoss: `DATA_LOSS`, trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, } func isHealthEndpoint(path string) bool { // Health checking is pretty frequent and // traces collected for health endpoints // can be extremely noisy and expensive. // Disable canonical health checking endpoints // like /healthz and /_ah/health for now. if path == "/healthz" || path == "/_ah/health" { return true } return false } opencensus-go-0.24.0/plugin/ochttp/trace_test.go000066400000000000000000000452401433102037600216510ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "bytes" "context" "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "log" "net" "net/http" "net/http/httptest" "net/url" "reflect" "strings" "testing" "time" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/plugin/ochttp/propagation/tracecontext" "go.opencensus.io/trace" ) type testExporter struct { spans []*trace.SpanData } func (t *testExporter) ExportSpan(s *trace.SpanData) { t.spans = append(t.spans, s) } type testTransport struct { ch chan *http.Request } func (t *testTransport) RoundTrip(req *http.Request) (*http.Response, error) { t.ch <- req return nil, errors.New("noop") } type testPropagator struct{} func (t testPropagator) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { header := req.Header.Get("trace") buf, err := hex.DecodeString(header) if err != nil { log.Fatalf("Cannot decode trace header: %q", header) } r := bytes.NewReader(buf) r.Read(sc.TraceID[:]) r.Read(sc.SpanID[:]) opts, err := r.ReadByte() if err != nil { log.Fatalf("Cannot read trace options from trace header: %q", header) } sc.TraceOptions = trace.TraceOptions(opts) return sc, true } func (t testPropagator) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { var buf bytes.Buffer buf.Write(sc.TraceID[:]) buf.Write(sc.SpanID[:]) buf.WriteByte(byte(sc.TraceOptions)) req.Header.Set("trace", hex.EncodeToString(buf.Bytes())) } func TestTransport_RoundTrip_Race(t *testing.T) { // This tests that we don't modify the request in accordance with the // specification for http.RoundTripper. // We attempt to trigger a race by reading the request from a separate // goroutine. If the request is modified by Transport, this should trigger // the race detector. transport := &testTransport{ch: make(chan *http.Request, 1)} rt := &Transport{ Propagation: &testPropagator{}, Base: transport, } req, _ := http.NewRequest("GET", "http://foo.com", nil) go func() { fmt.Println(*req) }() rt.RoundTrip(req) _ = <-transport.ch } func TestTransport_RoundTrip(t *testing.T) { _, parent := trace.StartSpan(context.Background(), "parent") tests := []struct { name string parent *trace.Span }{ { name: "no parent", parent: nil, }, { name: "parent", parent: parent, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { transport := &testTransport{ch: make(chan *http.Request, 1)} rt := &Transport{ Propagation: &testPropagator{}, Base: transport, } req, _ := http.NewRequest("GET", "http://foo.com", nil) if tt.parent != nil { req = req.WithContext(trace.NewContext(req.Context(), tt.parent)) } rt.RoundTrip(req) req = <-transport.ch span := trace.FromContext(req.Context()) if header := req.Header.Get("trace"); header == "" { t.Fatalf("Trace header = empty; want valid trace header") } if span == nil { t.Fatalf("Got no spans in req context; want one") } if tt.parent != nil { if got, want := span.SpanContext().TraceID, tt.parent.SpanContext().TraceID; got != want { t.Errorf("span.SpanContext().TraceID=%v; want %v", got, want) } } }) } } func TestHandler(t *testing.T) { traceID := [16]byte{16, 84, 69, 170, 120, 67, 188, 139, 242, 6, 177, 32, 0, 16, 0, 0} tests := []struct { header string wantTraceID trace.TraceID wantTraceOptions trace.TraceOptions }{ { header: "105445aa7843bc8bf206b12000100000000000000000000000", wantTraceID: traceID, wantTraceOptions: trace.TraceOptions(0), }, { header: "105445aa7843bc8bf206b12000100000000000000000000001", wantTraceID: traceID, wantTraceOptions: trace.TraceOptions(1), }, } for _, tt := range tests { t.Run(tt.header, func(t *testing.T) { handler := &Handler{ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.FromContext(r.Context()) sc := span.SpanContext() if got, want := sc.TraceID, tt.wantTraceID; got != want { t.Errorf("TraceID = %q; want %q", got, want) } if got, want := sc.TraceOptions, tt.wantTraceOptions; got != want { t.Errorf("TraceOptions = %v; want %v", got, want) } }), StartOptions: trace.StartOptions{Sampler: trace.ProbabilitySampler(0.0)}, Propagation: &testPropagator{}, } req, _ := http.NewRequest("GET", "http://foo.com", nil) req.Header.Add("trace", tt.header) handler.ServeHTTP(nil, req) }) } } var _ http.RoundTripper = (*traceTransport)(nil) type collector []*trace.SpanData func (c *collector) ExportSpan(s *trace.SpanData) { *c = append(*c, s) } func TestEndToEnd(t *testing.T) { tc := []struct { name string handler *Handler transport *Transport wantSameTraceID bool wantLinks bool // expect a link between client and server span }{ { name: "internal default propagation", handler: &Handler{}, transport: &Transport{}, wantSameTraceID: true, }, { name: "external default propagation", handler: &Handler{IsPublicEndpoint: true}, transport: &Transport{}, wantSameTraceID: false, wantLinks: true, }, { name: "internal TraceContext propagation", handler: &Handler{Propagation: &tracecontext.HTTPFormat{}}, transport: &Transport{Propagation: &tracecontext.HTTPFormat{}}, wantSameTraceID: true, }, { name: "misconfigured propagation", handler: &Handler{IsPublicEndpoint: true, Propagation: &tracecontext.HTTPFormat{}}, transport: &Transport{Propagation: &b3.HTTPFormat{}}, wantSameTraceID: false, wantLinks: false, }, } for _, tt := range tc { t.Run(tt.name, func(t *testing.T) { var spans collector trace.RegisterExporter(&spans) defer trace.UnregisterExporter(&spans) // Start the server. serverDone := make(chan struct{}) serverReturn := make(chan time.Time) tt.handler.StartOptions.Sampler = trace.AlwaysSample() url := serveHTTP(tt.handler, serverDone, serverReturn, 200) ctx := context.Background() // Make the request. req, err := http.NewRequest( http.MethodPost, fmt.Sprintf("%s/example/url/path?qparam=val", url), strings.NewReader("expected-request-body")) if err != nil { t.Fatal(err) } req = req.WithContext(ctx) tt.transport.StartOptions.Sampler = trace.AlwaysSample() c := &http.Client{ Transport: tt.transport, } resp, err := c.Do(req) if err != nil { t.Fatal(err) } if resp.StatusCode != http.StatusOK { t.Fatalf("resp.StatusCode = %d", resp.StatusCode) } // Tell the server to return from request handling. serverReturn <- time.Now().Add(time.Millisecond) respBody, err := ioutil.ReadAll(resp.Body) if err != nil { t.Fatal(err) } if got, want := string(respBody), "expected-response"; got != want { t.Fatalf("respBody = %q; want %q", got, want) } resp.Body.Close() <-serverDone trace.UnregisterExporter(&spans) if got, want := len(spans), 2; got != want { t.Fatalf("len(spans) = %d; want %d", got, want) } var client, server *trace.SpanData for _, sp := range spans { switch sp.SpanKind { case trace.SpanKindClient: client = sp if got, want := client.Name, "/example/url/path"; got != want { t.Errorf("Span name: %q; want %q", got, want) } case trace.SpanKindServer: server = sp if got, want := server.Name, "/example/url/path"; got != want { t.Errorf("Span name: %q; want %q", got, want) } default: t.Fatalf("server or client span missing; kind = %v", sp.SpanKind) } } if tt.wantSameTraceID { if server.TraceID != client.TraceID { t.Errorf("TraceID does not match: server.TraceID=%q client.TraceID=%q", server.TraceID, client.TraceID) } if !server.HasRemoteParent { t.Errorf("server span should have remote parent") } if server.ParentSpanID != client.SpanID { t.Errorf("server span should have client span as parent") } } if !tt.wantSameTraceID { if server.TraceID == client.TraceID { t.Errorf("TraceID should not be trusted") } } if tt.wantLinks { if got, want := len(server.Links), 1; got != want { t.Errorf("len(server.Links) = %d; want %d", got, want) } else { link := server.Links[0] if got, want := link.Type, trace.LinkTypeParent; got != want { t.Errorf("link.Type = %v; want %v", got, want) } } } if server.StartTime.Before(client.StartTime) { t.Errorf("server span starts before client span") } if server.EndTime.After(client.EndTime) { t.Errorf("client span ends before server span") } }) } } func serveHTTP(handler *Handler, done chan struct{}, wait chan time.Time, statusCode int) string { handler.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(statusCode) w.(http.Flusher).Flush() // Simulate a slow-responding server. sleepUntil := <-wait for time.Now().Before(sleepUntil) { time.Sleep(time.Until(sleepUntil)) } io.WriteString(w, "expected-response") close(done) }) server := httptest.NewServer(handler) go func() { <-done server.Close() }() return server.URL } func TestSpanNameFromURL(t *testing.T) { tests := []struct { u string want string }{ { u: "http://localhost:80/hello?q=a", want: "/hello", }, { u: "/a/b?q=c", want: "/a/b", }, } for _, tt := range tests { t.Run(tt.u, func(t *testing.T) { req, err := http.NewRequest("GET", tt.u, nil) if err != nil { t.Errorf("url issue = %v", err) } if got := spanNameFromURL(req); got != tt.want { t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want) } }) } } func TestFormatSpanName(t *testing.T) { formatSpanName := func(r *http.Request) string { return r.Method + " " + r.URL.Path } handler := &Handler{ Handler: http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { resp.Write([]byte("Hello, world!")) }), FormatSpanName: formatSpanName, } server := httptest.NewServer(handler) defer server.Close() client := &http.Client{ Transport: &Transport{ FormatSpanName: formatSpanName, StartOptions: trace.StartOptions{ Sampler: trace.AlwaysSample(), }, }, } tests := []struct { u string want string }{ { u: "/hello?q=a", want: "GET /hello", }, { u: "/a/b?q=c", want: "GET /a/b", }, } for _, tt := range tests { t.Run(tt.u, func(t *testing.T) { var te testExporter trace.RegisterExporter(&te) res, err := client.Get(server.URL + tt.u) if err != nil { t.Fatalf("error creating request: %v", err) } res.Body.Close() trace.UnregisterExporter(&te) if want, got := 2, len(te.spans); want != got { t.Fatalf("got exported spans %#v, wanted two spans", te.spans) } if got := te.spans[0].Name; got != tt.want { t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want) } if got := te.spans[1].Name; got != tt.want { t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want) } }) } } func TestRequestAttributes(t *testing.T) { tests := []struct { name string makeReq func() *http.Request wantAttrs []trace.Attribute }{ { name: "GET example.com/hello", makeReq: func() *http.Request { req, _ := http.NewRequest("GET", "http://example.com:779/hello", nil) req.Header.Add("User-Agent", "ua") return req }, wantAttrs: []trace.Attribute{ trace.StringAttribute("http.path", "/hello"), trace.StringAttribute("http.url", "http://example.com:779/hello"), trace.StringAttribute("http.host", "example.com:779"), trace.StringAttribute("http.method", "GET"), trace.StringAttribute("http.user_agent", "ua"), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req := tt.makeReq() attrs := requestAttrs(req) if got, want := attrs, tt.wantAttrs; !reflect.DeepEqual(got, want) { t.Errorf("Request attributes = %#v; want %#v", got, want) } }) } } func TestResponseAttributes(t *testing.T) { tests := []struct { name string resp *http.Response wantAttrs []trace.Attribute }{ { name: "non-zero HTTP 200 response", resp: &http.Response{StatusCode: 200}, wantAttrs: []trace.Attribute{ trace.Int64Attribute("http.status_code", 200), }, }, { name: "zero HTTP 500 response", resp: &http.Response{StatusCode: 500}, wantAttrs: []trace.Attribute{ trace.Int64Attribute("http.status_code", 500), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { attrs := responseAttrs(tt.resp) if got, want := attrs, tt.wantAttrs; !reflect.DeepEqual(got, want) { t.Errorf("Response attributes = %#v; want %#v", got, want) } }) } } type TestCase struct { Name string Method string URL string Headers map[string]string ResponseCode int SpanName string SpanStatus string SpanKind string SpanAttributes map[string]string } func TestAgainstSpecs(t *testing.T) { fmt.Println("start") dat, err := ioutil.ReadFile("testdata/http-out-test-cases.json") if err != nil { t.Fatalf("error reading file: %v", err) } tests := make([]TestCase, 0) err = json.Unmarshal(dat, &tests) if err != nil { t.Fatalf("error parsing json: %v", err) } trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { var spans collector trace.RegisterExporter(&spans) defer trace.UnregisterExporter(&spans) handler := &Handler{} transport := &Transport{} serverDone := make(chan struct{}) serverReturn := make(chan time.Time) host := "" port := "" serverRequired := strings.Contains(tt.URL, "{") if serverRequired { // Start the server. localServerURL := serveHTTP(handler, serverDone, serverReturn, tt.ResponseCode) u, _ := url.Parse(localServerURL) host, port, _ = net.SplitHostPort(u.Host) tt.URL = strings.Replace(tt.URL, "{host}", host, 1) tt.URL = strings.Replace(tt.URL, "{port}", port, 1) } // Start a root Span in the client. ctx, _ := trace.StartSpan( context.Background(), "top-level") // Make the request. req, err := http.NewRequest( tt.Method, tt.URL, nil) for headerName, headerValue := range tt.Headers { req.Header.Add(headerName, headerValue) } if err != nil { t.Fatal(err) } req = req.WithContext(ctx) resp, err := transport.RoundTrip(req) if err != nil { // do not fail. We want to validate DNS issues //t.Fatal(err) } if serverRequired { // Tell the server to return from request handling. serverReturn <- time.Now().Add(time.Millisecond) } if resp != nil { // If it simply closes body without reading // synchronization problem may happen for spans slice. // Server span and client span will write themselves // at the same time ioutil.ReadAll(resp.Body) resp.Body.Close() if serverRequired { <-serverDone } } trace.UnregisterExporter(&spans) var client *trace.SpanData for _, sp := range spans { if sp.SpanKind == trace.SpanKindClient { client = sp } } if client.Name != tt.SpanName { t.Errorf("span names don't match: expected: %s, actual: %s", tt.SpanName, client.Name) } spanKindToStr := map[int]string{ trace.SpanKindClient: "Client", trace.SpanKindServer: "Server", } if !strings.EqualFold(codeToStr[client.Status.Code], tt.SpanStatus) { t.Errorf("span status don't match: expected: %s, actual: %d (%s)", tt.SpanStatus, client.Status.Code, codeToStr[client.Status.Code]) } if !strings.EqualFold(spanKindToStr[client.SpanKind], tt.SpanKind) { t.Errorf("span kind don't match: expected: %s, actual: %d (%s)", tt.SpanKind, client.SpanKind, spanKindToStr[client.SpanKind]) } normalizedActualAttributes := map[string]string{} for k, v := range client.Attributes { normalizedActualAttributes[k] = fmt.Sprintf("%v", v) } normalizedExpectedAttributes := map[string]string{} for k, v := range tt.SpanAttributes { normalizedValue := v normalizedValue = strings.Replace(normalizedValue, "{host}", host, 1) normalizedValue = strings.Replace(normalizedValue, "{port}", port, 1) normalizedExpectedAttributes[k] = normalizedValue } if got, want := normalizedActualAttributes, normalizedExpectedAttributes; !reflect.DeepEqual(got, want) { t.Errorf("Request attributes = %#v; want %#v", got, want) } }) } } func TestStatusUnitTest(t *testing.T) { tests := []struct { in int want trace.Status }{ {200, trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, {204, trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, {100, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, {500, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, {400, trace.Status{Code: trace.StatusCodeInvalidArgument, Message: `INVALID_ARGUMENT`}}, {422, trace.Status{Code: trace.StatusCodeInvalidArgument, Message: `INVALID_ARGUMENT`}}, {499, trace.Status{Code: trace.StatusCodeCancelled, Message: `CANCELLED`}}, {404, trace.Status{Code: trace.StatusCodeNotFound, Message: `NOT_FOUND`}}, {600, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, {401, trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `UNAUTHENTICATED`}}, {403, trace.Status{Code: trace.StatusCodePermissionDenied, Message: `PERMISSION_DENIED`}}, {301, trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, {501, trace.Status{Code: trace.StatusCodeUnimplemented, Message: `UNIMPLEMENTED`}}, {409, trace.Status{Code: trace.StatusCodeAlreadyExists, Message: `ALREADY_EXISTS`}}, {429, trace.Status{Code: trace.StatusCodeResourceExhausted, Message: `RESOURCE_EXHAUSTED`}}, {503, trace.Status{Code: trace.StatusCodeUnavailable, Message: `UNAVAILABLE`}}, {504, trace.Status{Code: trace.StatusCodeDeadlineExceeded, Message: `DEADLINE_EXCEEDED`}}, } for _, tt := range tests { got, want := TraceStatus(tt.in, ""), tt.want if got != want { t.Errorf("status(%d) got = (%#v) want = (%#v)", tt.in, got, want) } } } opencensus-go-0.24.0/plugin/ochttp/wrapped_body.go000066400000000000000000000021201433102037600221610ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package ochttp import ( "io" ) // wrappedBody returns a wrapped version of the original // Body and only implements the same combination of additional // interfaces as the original. func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { var ( wr, i0 = body.(io.Writer) ) switch { case !i0: return struct { io.ReadCloser }{wrapper} case i0: return struct { io.ReadCloser io.Writer }{wrapper, wr} default: return struct { io.ReadCloser }{wrapper} } } opencensus-go-0.24.0/plugin/runmetrics/000077500000000000000000000000001433102037600200525ustar00rootroot00000000000000opencensus-go-0.24.0/plugin/runmetrics/doc.go000066400000000000000000000016061433102037600211510ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package runmetrics contains support for runtime metrics. // // To enable collecting runtime metrics, just call Enable(): // // _ := runmetrics.Enable(runmetrics.RunMetricOptions{ // EnableCPU: true, // EnableMemory: true, // }) package runmetrics // import "go.opencensus.io/plugin/runmetrics" opencensus-go-0.24.0/plugin/runmetrics/example_test.go000066400000000000000000000043651433102037600231030ustar00rootroot00000000000000package runmetrics_test import ( "context" "fmt" "log" "sort" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" "go.opencensus.io/plugin/runmetrics" ) type printExporter struct { } func (l *printExporter) ExportMetrics(ctx context.Context, data []*metricdata.Metric) error { mapData := make(map[string]metricdata.Metric, 0) for _, v := range data { mapData[v.Descriptor.Name] = *v } mapKeys := make([]string, 0, len(mapData)) for key := range mapData { mapKeys = append(mapKeys, key) } sort.Strings(mapKeys) // for the sake of a simple example, we cannot use the real value here simpleVal := func(v interface{}) int { return 42 } for _, k := range mapKeys { v := mapData[k] fmt.Printf("%s %d\n", k, simpleVal(v.TimeSeries[0].Points[0].Value)) } return nil } func ExampleEnable() { // Enable collection of runtime metrics and supply options err := runmetrics.Enable(runmetrics.RunMetricOptions{ EnableCPU: true, EnableMemory: true, Prefix: "mayapp/", }) if err != nil { log.Fatal(err) } // Use your reader/exporter to extract values // This part is not specific to runtime metrics and only here to make it a complete example. metricexport.NewReader().ReadAndExport(&printExporter{}) // output: // mayapp/process/cpu_cgo_calls 42 // mayapp/process/cpu_goroutines 42 // mayapp/process/gc_cpu_fraction 42 // mayapp/process/gc_sys 42 // mayapp/process/heap_alloc 42 // mayapp/process/heap_idle 42 // mayapp/process/heap_inuse 42 // mayapp/process/heap_objects 42 // mayapp/process/heap_release 42 // mayapp/process/last_gc_finished_timestamp 42 // mayapp/process/memory_alloc 42 // mayapp/process/memory_frees 42 // mayapp/process/memory_lookups 42 // mayapp/process/memory_malloc 42 // mayapp/process/next_gc_heap_size 42 // mayapp/process/num_forced_gc 42 // mayapp/process/num_gc 42 // mayapp/process/other_sys 42 // mayapp/process/pause_total 42 // mayapp/process/stack_inuse 42 // mayapp/process/stack_mcache_inuse 42 // mayapp/process/stack_mspan_inuse 42 // mayapp/process/sys_heap 42 // mayapp/process/sys_memory_alloc 42 // mayapp/process/sys_stack 42 // mayapp/process/sys_stack_mcache 42 // mayapp/process/sys_stack_mspan 42 // mayapp/process/total_memory_alloc 42 } opencensus-go-0.24.0/plugin/runmetrics/producer.go000066400000000000000000000503271433102037600222330ustar00rootroot00000000000000package runmetrics import ( "errors" "runtime" "sync" "time" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) type ( // producer produces runtime metrics. // // Enable collection of runtime metrics with Enable(). producer struct { options RunMetricOptions reg *metric.Registry deprecatedMemStats *deprecatedMemStats memStats *memStats cpuStats *cpuStats } // RunMetricOptions allows to configure runtime metrics. RunMetricOptions struct { EnableCPU bool // EnableCPU whether CPU metrics shall be recorded EnableMemory bool // EnableMemory whether memory metrics shall be recorded Prefix string // Prefix is a custom prefix for metric names UseDerivedCumulative bool // UseDerivedCumulative whether DerivedCumulative metrics should be used } deprecatedMemStats struct { memStats runtime.MemStats memAlloc *metric.Int64GaugeEntry memTotal *metric.Int64GaugeEntry memSys *metric.Int64GaugeEntry memLookups *metric.Int64GaugeEntry memMalloc *metric.Int64GaugeEntry memFrees *metric.Int64GaugeEntry heapAlloc *metric.Int64GaugeEntry heapSys *metric.Int64GaugeEntry heapIdle *metric.Int64GaugeEntry heapInuse *metric.Int64GaugeEntry heapObjects *metric.Int64GaugeEntry heapReleased *metric.Int64GaugeEntry stackInuse *metric.Int64GaugeEntry stackSys *metric.Int64GaugeEntry stackMSpanInuse *metric.Int64GaugeEntry stackMSpanSys *metric.Int64GaugeEntry stackMCacheInuse *metric.Int64GaugeEntry stackMCacheSys *metric.Int64GaugeEntry otherSys *metric.Int64GaugeEntry gcSys *metric.Int64GaugeEntry numGC *metric.Int64GaugeEntry numForcedGC *metric.Int64GaugeEntry nextGC *metric.Int64GaugeEntry lastGC *metric.Int64GaugeEntry pauseTotalNs *metric.Int64GaugeEntry gcCPUFraction *metric.Float64Entry } memStats struct { memStats runtime.MemStats memAlloc *metric.Int64GaugeEntry memTotal *metric.Int64DerivedCumulative memSys *metric.Int64GaugeEntry memLookups *metric.Int64DerivedCumulative memMalloc *metric.Int64DerivedCumulative memFrees *metric.Int64DerivedCumulative heapAlloc *metric.Int64GaugeEntry heapSys *metric.Int64GaugeEntry heapIdle *metric.Int64GaugeEntry heapInuse *metric.Int64GaugeEntry heapObjects *metric.Int64GaugeEntry heapReleased *metric.Int64DerivedCumulative stackInuse *metric.Int64GaugeEntry stackSys *metric.Int64GaugeEntry stackMSpanInuse *metric.Int64GaugeEntry stackMSpanSys *metric.Int64GaugeEntry stackMCacheInuse *metric.Int64GaugeEntry stackMCacheSys *metric.Int64GaugeEntry otherSys *metric.Int64GaugeEntry gcSys *metric.Int64GaugeEntry numGC *metric.Int64DerivedCumulative numForcedGC *metric.Int64DerivedCumulative nextGC *metric.Int64GaugeEntry lastGC *metric.Int64GaugeEntry pauseTotalNs *metric.Int64DerivedCumulative gcCPUFraction *metric.Float64Entry } cpuStats struct { numGoroutines *metric.Int64GaugeEntry numCgoCalls *metric.Int64GaugeEntry } ) var ( _ metricproducer.Producer = (*producer)(nil) enableMutex sync.Mutex enabledProducer *producer ) // Enable enables collection of runtime metrics. // // Supply RunMetricOptions to configure the behavior of metrics collection. // An error might be returned, if creating metrics gauges fails. // // Previous calls will be overwritten by subsequent ones. func Enable(options RunMetricOptions) error { producer := &producer{options: options, reg: metric.NewRegistry()} var err error if options.EnableMemory { switch options.UseDerivedCumulative { case true: producer.memStats, err = newMemStats(producer) if err != nil { return err } default: producer.deprecatedMemStats, err = newDeprecatedMemStats(producer) if err != nil { return err } } } if options.EnableCPU { producer.cpuStats, err = newCPUStats(producer) if err != nil { return err } } enableMutex.Lock() defer enableMutex.Unlock() metricproducer.GlobalManager().DeleteProducer(enabledProducer) metricproducer.GlobalManager().AddProducer(producer) enabledProducer = producer return nil } // Disable disables collection of runtime metrics. func Disable() { enableMutex.Lock() defer enableMutex.Unlock() metricproducer.GlobalManager().DeleteProducer(enabledProducer) enabledProducer = nil } // Read reads the current runtime metrics. func (p *producer) Read() []*metricdata.Metric { if p.memStats != nil { p.memStats.read() } if p.cpuStats != nil { p.cpuStats.read() } return p.reg.Read() } func newDeprecatedMemStats(producer *producer) (*deprecatedMemStats, error) { var err error memStats := &deprecatedMemStats{} // General memStats.memAlloc, err = producer.createInt64GaugeEntry("process/memory_alloc", "Number of bytes currently allocated in use", metricdata.UnitBytes) if err != nil { return nil, err } memStats.memTotal, err = producer.createInt64GaugeEntry("process/total_memory_alloc", "Number of allocations in total", metricdata.UnitBytes) if err != nil { return nil, err } memStats.memSys, err = producer.createInt64GaugeEntry("process/sys_memory_alloc", "Number of bytes given to the process to use in total", metricdata.UnitBytes) if err != nil { return nil, err } memStats.memLookups, err = producer.createInt64GaugeEntry("process/memory_lookups", "Cumulative number of pointer lookups performed by the runtime", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.memMalloc, err = producer.createInt64GaugeEntry("process/memory_malloc", "Cumulative count of heap objects allocated", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.memFrees, err = producer.createInt64GaugeEntry("process/memory_frees", "Cumulative count of heap objects freed", metricdata.UnitDimensionless) if err != nil { return nil, err } // Heap memStats.heapAlloc, err = producer.createInt64GaugeEntry("process/heap_alloc", "Process heap allocation", metricdata.UnitBytes) if err != nil { return nil, err } memStats.heapSys, err = producer.createInt64GaugeEntry("process/sys_heap", "Bytes of heap memory obtained from the OS", metricdata.UnitBytes) if err != nil { return nil, err } memStats.heapIdle, err = producer.createInt64GaugeEntry("process/heap_idle", "Bytes in idle (unused) spans", metricdata.UnitBytes) if err != nil { return nil, err } memStats.heapInuse, err = producer.createInt64GaugeEntry("process/heap_inuse", "Bytes in in-use spans", metricdata.UnitBytes) if err != nil { return nil, err } memStats.heapObjects, err = producer.createInt64GaugeEntry("process/heap_objects", "The number of objects allocated on the heap", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.heapReleased, err = producer.createInt64GaugeEntry("process/heap_release", "The cumulative number of objects released from the heap", metricdata.UnitBytes) if err != nil { return nil, err } // Stack memStats.stackInuse, err = producer.createInt64GaugeEntry("process/stack_inuse", "Bytes in stack spans", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackSys, err = producer.createInt64GaugeEntry("process/sys_stack", "The memory used by stack spans and OS thread stacks", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackMSpanInuse, err = producer.createInt64GaugeEntry("process/stack_mspan_inuse", "Bytes of allocated mspan structures", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackMSpanSys, err = producer.createInt64GaugeEntry("process/sys_stack_mspan", "Bytes of memory obtained from the OS for mspan structures", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackMCacheInuse, err = producer.createInt64GaugeEntry("process/stack_mcache_inuse", "Bytes of allocated mcache structures", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackMCacheSys, err = producer.createInt64GaugeEntry("process/sys_stack_mcache", "Bytes of memory obtained from the OS for mcache structures", metricdata.UnitBytes) if err != nil { return nil, err } // GC memStats.gcSys, err = producer.createInt64GaugeEntry("process/gc_sys", "Bytes of memory in garbage collection metadatas", metricdata.UnitBytes) if err != nil { return nil, err } memStats.otherSys, err = producer.createInt64GaugeEntry("process/other_sys", "Bytes of memory in miscellaneous off-heap runtime allocations", metricdata.UnitBytes) if err != nil { return nil, err } memStats.numGC, err = producer.createInt64GaugeEntry("process/num_gc", "Cumulative count of completed GC cycles", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.numForcedGC, err = producer.createInt64GaugeEntry("process/num_forced_gc", "Cumulative count of GC cycles forced by the application", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.nextGC, err = producer.createInt64GaugeEntry("process/next_gc_heap_size", "Target heap size of the next GC cycle in bytes", metricdata.UnitBytes) if err != nil { return nil, err } memStats.lastGC, err = producer.createInt64GaugeEntry("process/last_gc_finished_timestamp", "Time the last garbage collection finished, as milliseconds since 1970 (the UNIX epoch)", metricdata.UnitMilliseconds) if err != nil { return nil, err } memStats.pauseTotalNs, err = producer.createInt64GaugeEntry("process/pause_total", "Cumulative milliseconds spent in GC stop-the-world pauses", metricdata.UnitMilliseconds) if err != nil { return nil, err } memStats.gcCPUFraction, err = producer.createFloat64GaugeEntry("process/gc_cpu_fraction", "Fraction of this program's available CPU time used by the GC since the program started", metricdata.UnitDimensionless) if err != nil { return nil, err } return memStats, nil } func (m *deprecatedMemStats) read() { runtime.ReadMemStats(&m.memStats) m.memAlloc.Set(int64(m.memStats.Alloc)) m.memTotal.Set(int64(m.memStats.TotalAlloc)) m.memSys.Set(int64(m.memStats.Sys)) m.memLookups.Set(int64(m.memStats.Lookups)) m.memMalloc.Set(int64(m.memStats.Mallocs)) m.memFrees.Set(int64(m.memStats.Frees)) m.heapAlloc.Set(int64(m.memStats.HeapAlloc)) m.heapSys.Set(int64(m.memStats.HeapSys)) m.heapIdle.Set(int64(m.memStats.HeapIdle)) m.heapInuse.Set(int64(m.memStats.HeapInuse)) m.heapReleased.Set(int64(m.memStats.HeapReleased)) m.heapObjects.Set(int64(m.memStats.HeapObjects)) m.stackInuse.Set(int64(m.memStats.StackInuse)) m.stackSys.Set(int64(m.memStats.StackSys)) m.stackMSpanInuse.Set(int64(m.memStats.MSpanInuse)) m.stackMSpanSys.Set(int64(m.memStats.MSpanSys)) m.stackMCacheInuse.Set(int64(m.memStats.MCacheInuse)) m.stackMCacheSys.Set(int64(m.memStats.MCacheSys)) m.gcSys.Set(int64(m.memStats.GCSys)) m.otherSys.Set(int64(m.memStats.OtherSys)) m.numGC.Set(int64(m.memStats.NumGC)) m.numForcedGC.Set(int64(m.memStats.NumForcedGC)) m.nextGC.Set(int64(m.memStats.NextGC)) m.lastGC.Set(int64(m.memStats.LastGC) / int64(time.Millisecond)) m.pauseTotalNs.Set(int64(m.memStats.PauseTotalNs) / int64(time.Millisecond)) m.gcCPUFraction.Set(m.memStats.GCCPUFraction) } func newMemStats(producer *producer) (*memStats, error) { var err error memStats := &memStats{} // General memStats.memAlloc, err = producer.createInt64GaugeEntry("process/memory_alloc", "Number of bytes currently allocated in use", metricdata.UnitBytes) if err != nil { return nil, err } memStats.memTotal, err = producer.createInt64DerivedCumulative("process/total_memory_alloc", "Number of allocations in total", metricdata.UnitBytes) if err != nil { return nil, err } memStats.memSys, err = producer.createInt64GaugeEntry("process/sys_memory_alloc", "Number of bytes given to the process to use in total", metricdata.UnitBytes) if err != nil { return nil, err } memStats.memLookups, err = producer.createInt64DerivedCumulative("process/memory_lookups", "Cumulative number of pointer lookups performed by the runtime", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.memMalloc, err = producer.createInt64DerivedCumulative("process/memory_malloc", "Cumulative count of heap objects allocated", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.memFrees, err = producer.createInt64DerivedCumulative("process/memory_frees", "Cumulative count of heap objects freed", metricdata.UnitDimensionless) if err != nil { return nil, err } // Heap memStats.heapAlloc, err = producer.createInt64GaugeEntry("process/heap_alloc", "Process heap allocation", metricdata.UnitBytes) if err != nil { return nil, err } memStats.heapSys, err = producer.createInt64GaugeEntry("process/sys_heap", "Bytes of heap memory obtained from the OS", metricdata.UnitBytes) if err != nil { return nil, err } memStats.heapIdle, err = producer.createInt64GaugeEntry("process/heap_idle", "Bytes in idle (unused) spans", metricdata.UnitBytes) if err != nil { return nil, err } memStats.heapInuse, err = producer.createInt64GaugeEntry("process/heap_inuse", "Bytes in in-use spans", metricdata.UnitBytes) if err != nil { return nil, err } memStats.heapObjects, err = producer.createInt64GaugeEntry("process/heap_objects", "The number of objects allocated on the heap", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.heapReleased, err = producer.createInt64DerivedCumulative("process/heap_release", "The cumulative number of objects released from the heap", metricdata.UnitBytes) if err != nil { return nil, err } // Stack memStats.stackInuse, err = producer.createInt64GaugeEntry("process/stack_inuse", "Bytes in stack spans", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackSys, err = producer.createInt64GaugeEntry("process/sys_stack", "The memory used by stack spans and OS thread stacks", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackMSpanInuse, err = producer.createInt64GaugeEntry("process/stack_mspan_inuse", "Bytes of allocated mspan structures", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackMSpanSys, err = producer.createInt64GaugeEntry("process/sys_stack_mspan", "Bytes of memory obtained from the OS for mspan structures", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackMCacheInuse, err = producer.createInt64GaugeEntry("process/stack_mcache_inuse", "Bytes of allocated mcache structures", metricdata.UnitBytes) if err != nil { return nil, err } memStats.stackMCacheSys, err = producer.createInt64GaugeEntry("process/sys_stack_mcache", "Bytes of memory obtained from the OS for mcache structures", metricdata.UnitBytes) if err != nil { return nil, err } // GC memStats.gcSys, err = producer.createInt64GaugeEntry("process/gc_sys", "Bytes of memory in garbage collection metadatas", metricdata.UnitBytes) if err != nil { return nil, err } memStats.otherSys, err = producer.createInt64GaugeEntry("process/other_sys", "Bytes of memory in miscellaneous off-heap runtime allocations", metricdata.UnitBytes) if err != nil { return nil, err } memStats.numGC, err = producer.createInt64DerivedCumulative("process/num_gc", "Cumulative count of completed GC cycles", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.numForcedGC, err = producer.createInt64DerivedCumulative("process/num_forced_gc", "Cumulative count of GC cycles forced by the application", metricdata.UnitDimensionless) if err != nil { return nil, err } memStats.nextGC, err = producer.createInt64GaugeEntry("process/next_gc_heap_size", "Target heap size of the next GC cycle in bytes", metricdata.UnitBytes) if err != nil { return nil, err } memStats.lastGC, err = producer.createInt64GaugeEntry("process/last_gc_finished_timestamp", "Time the last garbage collection finished, as milliseconds since 1970 (the UNIX epoch)", metricdata.UnitMilliseconds) if err != nil { return nil, err } memStats.pauseTotalNs, err = producer.createInt64DerivedCumulative("process/pause_total", "Cumulative milliseconds spent in GC stop-the-world pauses", metricdata.UnitMilliseconds) if err != nil { return nil, err } memStats.gcCPUFraction, err = producer.createFloat64GaugeEntry("process/gc_cpu_fraction", "Fraction of this program's available CPU time used by the GC since the program started", metricdata.UnitDimensionless) if err != nil { return nil, err } return memStats, nil } func (m *memStats) read() { runtime.ReadMemStats(&m.memStats) m.memAlloc.Set(int64(m.memStats.Alloc)) _ = m.memTotal.UpsertEntry(func() int64 { return int64(m.memStats.TotalAlloc) }) m.memSys.Set(int64(m.memStats.Sys)) _ = m.memLookups.UpsertEntry(func() int64 { return int64(m.memStats.Lookups) }) _ = m.memMalloc.UpsertEntry(func() int64 { return int64(m.memStats.Mallocs) }) _ = m.memFrees.UpsertEntry(func() int64 { return int64(m.memStats.Frees) }) m.heapAlloc.Set(int64(m.memStats.HeapAlloc)) m.heapSys.Set(int64(m.memStats.HeapSys)) m.heapIdle.Set(int64(m.memStats.HeapIdle)) m.heapInuse.Set(int64(m.memStats.HeapInuse)) _ = m.heapReleased.UpsertEntry(func() int64 { return int64(m.memStats.HeapReleased) }) m.heapObjects.Set(int64(m.memStats.HeapObjects)) m.stackInuse.Set(int64(m.memStats.StackInuse)) m.stackSys.Set(int64(m.memStats.StackSys)) m.stackMSpanInuse.Set(int64(m.memStats.MSpanInuse)) m.stackMSpanSys.Set(int64(m.memStats.MSpanSys)) m.stackMCacheInuse.Set(int64(m.memStats.MCacheInuse)) m.stackMCacheSys.Set(int64(m.memStats.MCacheSys)) m.gcSys.Set(int64(m.memStats.GCSys)) m.otherSys.Set(int64(m.memStats.OtherSys)) _ = m.numGC.UpsertEntry(func() int64 { return int64(m.memStats.NumGC) }) _ = m.numForcedGC.UpsertEntry(func() int64 { return int64(m.memStats.NumForcedGC) }) m.nextGC.Set(int64(m.memStats.NextGC)) m.lastGC.Set(int64(m.memStats.LastGC) / int64(time.Millisecond)) _ = m.pauseTotalNs.UpsertEntry(func() int64 { return int64(m.memStats.PauseTotalNs) / int64(time.Millisecond) }) m.gcCPUFraction.Set(m.memStats.GCCPUFraction) } func newCPUStats(producer *producer) (*cpuStats, error) { cpuStats := &cpuStats{} var err error cpuStats.numGoroutines, err = producer.createInt64GaugeEntry("process/cpu_goroutines", "Number of goroutines that currently exist", metricdata.UnitDimensionless) if err != nil { return nil, err } cpuStats.numCgoCalls, err = producer.createInt64GaugeEntry("process/cpu_cgo_calls", "Number of cgo calls made by the current process", metricdata.UnitDimensionless) if err != nil { return nil, err } return cpuStats, nil } func (c *cpuStats) read() { c.numGoroutines.Set(int64(runtime.NumGoroutine())) c.numCgoCalls.Set(runtime.NumCgoCall()) } func (p *producer) createFloat64GaugeEntry(name string, description string, unit metricdata.Unit) (*metric.Float64Entry, error) { if len(p.options.Prefix) > 0 { name = p.options.Prefix + name } gauge, err := p.reg.AddFloat64Gauge( name, metric.WithDescription(description), metric.WithUnit(unit)) if err != nil { return nil, errors.New("error creating gauge for " + name + ": " + err.Error()) } entry, err := gauge.GetEntry() if err != nil { return nil, errors.New("error getting gauge entry for " + name + ": " + err.Error()) } return entry, nil } func (p *producer) createInt64GaugeEntry(name string, description string, unit metricdata.Unit) (*metric.Int64GaugeEntry, error) { if len(p.options.Prefix) > 0 { name = p.options.Prefix + name } gauge, err := p.reg.AddInt64Gauge( name, metric.WithDescription(description), metric.WithUnit(unit)) if err != nil { return nil, errors.New("error creating gauge for " + name + ": " + err.Error()) } entry, err := gauge.GetEntry() if err != nil { return nil, errors.New("error getting gauge entry for " + name + ": " + err.Error()) } return entry, nil } func (p *producer) createInt64DerivedCumulative(name string, description string, unit metricdata.Unit) (*metric.Int64DerivedCumulative, error) { if len(p.options.Prefix) > 0 { name = p.options.Prefix + name } cumulative, err := p.reg.AddInt64DerivedCumulative( name, metric.WithDescription(description), metric.WithUnit(unit)) if err != nil { return nil, errors.New("error creating gauge for " + name + ": " + err.Error()) } return cumulative, nil } opencensus-go-0.24.0/plugin/runmetrics/producer_test.go000066400000000000000000000224541433102037600232720ustar00rootroot00000000000000package runmetrics_test import ( "context" "testing" "github.com/stretchr/testify/assert" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/plugin/runmetrics" ) type testExporter struct { data []*metricdata.Metric } func (t *testExporter) ExportMetrics(ctx context.Context, data []*metricdata.Metric) error { t.data = append(t.data, data...) return nil } func TestEnable(t *testing.T) { tests := []struct { name string options runmetrics.RunMetricOptions wantMetricNames [][]string dontWantMetricNames [][]string }{ { "no stats", runmetrics.RunMetricOptions{ EnableCPU: false, EnableMemory: false, }, [][]string{}, [][]string{}, }, { "cpu and memory stats", runmetrics.RunMetricOptions{ EnableCPU: true, EnableMemory: true, }, [][]string{ {"process/memory_alloc", "process/total_memory_alloc", "process/sys_memory_alloc", "process/memory_lookups", "process/memory_malloc", "process/memory_frees"}, {"process/heap_alloc", "process/sys_heap", "process/heap_idle", "process/heap_inuse", "process/heap_objects", "process/heap_release"}, {"process/stack_inuse", "process/sys_stack", "process/stack_mspan_inuse", "process/sys_stack_mspan", "process/stack_mcache_inuse", "process/sys_stack_mcache"}, {"process/gc_sys", "process/other_sys", "process/num_gc", "process/num_forced_gc", "process/next_gc_heap_size", "process/last_gc_finished_timestamp", "process/pause_total", "process/gc_cpu_fraction"}, {"process/cpu_goroutines", "process/cpu_cgo_calls"}, }, [][]string{}, }, { "cpu and deprecated memory stats", runmetrics.RunMetricOptions{ EnableCPU: true, EnableMemory: true, }, [][]string{ {"process/memory_alloc", "process/total_memory_alloc", "process/sys_memory_alloc", "process/memory_lookups", "process/memory_malloc", "process/memory_frees"}, {"process/heap_alloc", "process/sys_heap", "process/heap_idle", "process/heap_inuse", "process/heap_objects", "process/heap_release"}, {"process/stack_inuse", "process/sys_stack", "process/stack_mspan_inuse", "process/sys_stack_mspan", "process/stack_mcache_inuse", "process/sys_stack_mcache"}, {"process/gc_sys", "process/other_sys", "process/num_gc", "process/num_forced_gc", "process/next_gc_heap_size", "process/last_gc_finished_timestamp", "process/pause_total", "process/gc_cpu_fraction"}, {"process/cpu_goroutines", "process/cpu_cgo_calls"}, }, [][]string{}, }, { "only cpu stats", runmetrics.RunMetricOptions{ EnableCPU: true, EnableMemory: false, }, [][]string{ {"process/cpu_goroutines", "process/cpu_cgo_calls"}, }, [][]string{ {"process/memory_alloc", "process/total_memory_alloc", "process/sys_memory_alloc", "process/memory_lookups", "process/memory_malloc", "process/memory_frees"}, {"process/heap_alloc", "process/sys_heap", "process/heap_idle", "process/heap_inuse", "process/heap_objects", "process/heap_release"}, {"process/stack_inuse", "process/sys_stack", "process/stack_mspan_inuse", "process/sys_stack_mspan", "process/stack_mcache_inuse", "process/sys_stack_mcache"}, {"process/gc_sys", "process/other_sys", "process/num_gc", "process/num_forced_gc", "process/next_gc_heap_size", "process/last_gc_finished_timestamp", "process/pause_total", "process/gc_cpu_fraction"}, }, }, { "only memory stats", runmetrics.RunMetricOptions{ EnableCPU: false, EnableMemory: true, }, [][]string{ {"process/memory_alloc", "process/total_memory_alloc", "process/sys_memory_alloc", "process/memory_lookups", "process/memory_malloc", "process/memory_frees"}, {"process/heap_alloc", "process/sys_heap", "process/heap_idle", "process/heap_inuse", "process/heap_objects", "process/heap_release"}, {"process/stack_inuse", "process/sys_stack", "process/stack_mspan_inuse", "process/sys_stack_mspan", "process/stack_mcache_inuse", "process/sys_stack_mcache"}, {"process/gc_sys", "process/other_sys", "process/num_gc", "process/num_forced_gc", "process/next_gc_heap_size", "process/last_gc_finished_timestamp", "process/pause_total", "process/gc_cpu_fraction"}, }, [][]string{ {"process/cpu_goroutines", "process/cpu_cgo_calls"}, }, }, { "only deprecated memory stats", runmetrics.RunMetricOptions{ EnableCPU: false, EnableMemory: true, UseDerivedCumulative: true, }, [][]string{ {"process/memory_alloc", "process/total_memory_alloc", "process/sys_memory_alloc", "process/memory_lookups", "process/memory_malloc", "process/memory_frees"}, {"process/heap_alloc", "process/sys_heap", "process/heap_idle", "process/heap_inuse", "process/heap_objects", "process/heap_release"}, {"process/stack_inuse", "process/sys_stack", "process/stack_mspan_inuse", "process/sys_stack_mspan", "process/stack_mcache_inuse", "process/sys_stack_mcache"}, {"process/gc_sys", "process/other_sys", "process/num_gc", "process/num_forced_gc", "process/next_gc_heap_size", "process/last_gc_finished_timestamp", "process/pause_total", "process/gc_cpu_fraction"}, }, [][]string{ {"process/cpu_goroutines", "process/cpu_cgo_calls"}, }, }, { "cpu and deprecated memory stats with custom prefix", runmetrics.RunMetricOptions{ EnableCPU: true, EnableMemory: true, UseDerivedCumulative: true, Prefix: "test_", }, [][]string{ {"test_process/memory_alloc", "test_process/total_memory_alloc", "test_process/sys_memory_alloc", "test_process/memory_lookups", "test_process/memory_malloc", "test_process/memory_frees"}, {"test_process/heap_alloc", "test_process/sys_heap", "test_process/heap_idle", "test_process/heap_inuse", "test_process/heap_objects", "test_process/heap_release"}, {"test_process/stack_inuse", "test_process/sys_stack", "test_process/stack_mspan_inuse", "test_process/sys_stack_mspan", "test_process/stack_mcache_inuse", "test_process/sys_stack_mcache"}, {"test_process/gc_sys", "test_process/other_sys", "test_process/num_gc", "test_process/num_forced_gc", "test_process/next_gc_heap_size", "test_process/last_gc_finished_timestamp", "test_process/pause_total", "test_process/gc_cpu_fraction"}, {"test_process/cpu_goroutines", "test_process/cpu_cgo_calls"}, }, [][]string{}, }, { "cpu and memory stats with custom prefix", runmetrics.RunMetricOptions{ EnableCPU: true, EnableMemory: true, Prefix: "test_", }, [][]string{ {"test_process/memory_alloc", "test_process/total_memory_alloc", "test_process/sys_memory_alloc", "test_process/memory_lookups", "test_process/memory_malloc", "test_process/memory_frees"}, {"test_process/heap_alloc", "test_process/sys_heap", "test_process/heap_idle", "test_process/heap_inuse", "test_process/heap_objects", "test_process/heap_release"}, {"test_process/stack_inuse", "test_process/sys_stack", "test_process/stack_mspan_inuse", "test_process/sys_stack_mspan", "test_process/stack_mcache_inuse", "test_process/sys_stack_mcache"}, {"test_process/gc_sys", "test_process/other_sys", "test_process/num_gc", "test_process/num_forced_gc", "test_process/next_gc_heap_size", "test_process/last_gc_finished_timestamp", "test_process/pause_total", "test_process/gc_cpu_fraction"}, {"test_process/cpu_goroutines", "test_process/cpu_cgo_calls"}, }, [][]string{}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := runmetrics.Enable(test.options) if err != nil { t.Errorf("want: nil, got: %v", err) } defer runmetrics.Disable() exporter := &testExporter{} reader := metricexport.NewReader() reader.ReadAndExport(exporter) for _, want := range test.wantMetricNames { assertNames(t, true, exporter, want) } for _, dontWant := range test.dontWantMetricNames { assertNames(t, false, exporter, dontWant) } }) } } func assertNames(t *testing.T, wantIncluded bool, exporter *testExporter, expectedNames []string) { t.Helper() metricNames := make([]string, 0) for _, v := range exporter.data { metricNames = append(metricNames, v.Descriptor.Name) } for _, want := range expectedNames { if wantIncluded { assert.Contains(t, metricNames, want) } else { assert.NotContains(t, metricNames, want) } } } func TestEnable_RegistersWithGlobalManager(t *testing.T) { err := runmetrics.Enable(runmetrics.RunMetricOptions{}) if err != nil { t.Errorf("want: nil, got: %v", err) } registeredCount := len(metricproducer.GlobalManager().GetAll()) assert.Equal(t, 1, registeredCount, "expected a producer to be registered") } func TestEnable_RegistersNoDuplicates(t *testing.T) { err := runmetrics.Enable(runmetrics.RunMetricOptions{}) if err != nil { t.Errorf("want: nil, got: %v", err) } err = runmetrics.Enable(runmetrics.RunMetricOptions{}) if err != nil { t.Errorf("want: nil, got: %v", err) } producerCount := len(metricproducer.GlobalManager().GetAll()) assert.Equal(t, 1, producerCount, "expected one registered producer") } func TestDisable(t *testing.T) { err := runmetrics.Enable(runmetrics.RunMetricOptions{}) if err != nil { t.Errorf("want: nil, got: %v", err) } runmetrics.Disable() producerCount := len(metricproducer.GlobalManager().GetAll()) assert.Equal(t, 0, producerCount, "expected one registered producer") } opencensus-go-0.24.0/resource/000077500000000000000000000000001433102037600162105ustar00rootroot00000000000000opencensus-go-0.24.0/resource/resource.go000066400000000000000000000112431433102037600203670ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package resource provides functionality for resource, which capture // identifying information about the entities for which signals are exported. package resource import ( "context" "fmt" "os" "regexp" "sort" "strconv" "strings" ) // Environment variables used by FromEnv to decode a resource. const ( EnvVarType = "OC_RESOURCE_TYPE" EnvVarLabels = "OC_RESOURCE_LABELS" ) // Resource describes an entity about which identifying information and metadata is exposed. // For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. type Resource struct { Type string Labels map[string]string } // EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. func EncodeLabels(labels map[string]string) string { sortedKeys := make([]string, 0, len(labels)) for k := range labels { sortedKeys = append(sortedKeys, k) } sort.Strings(sortedKeys) s := "" for i, k := range sortedKeys { if i > 0 { s += "," } s += k + "=" + strconv.Quote(labels[k]) } return s } var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) // DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. // A list of labels of the form `="",="",...` is accepted. // Domain names and paths are accepted as label keys. // Most users will want to use FromEnv instead. func DecodeLabels(s string) (map[string]string, error) { m := map[string]string{} // Ensure a trailing comma, which allows us to keep the regex simpler s = strings.TrimRight(strings.TrimSpace(s), ",") + "," for len(s) > 0 { match := labelRegex.FindStringSubmatch(s) if len(match) == 0 { return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) } v := match[2] if v == "" { v = match[3] } else { var err error if v, err = strconv.Unquote(v); err != nil { return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) } } m[match[1]] = v s = s[len(match[0]):] } return m, nil } // FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE // and OC_RESOURCE_labelS environment variables. func FromEnv(context.Context) (*Resource, error) { res := &Resource{ Type: strings.TrimSpace(os.Getenv(EnvVarType)), } labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) if labels == "" { return res, nil } var err error if res.Labels, err = DecodeLabels(labels); err != nil { return nil, err } return res, nil } var _ Detector = FromEnv // merge resource information from b into a. In case of a collision, a takes precedence. func merge(a, b *Resource) *Resource { if a == nil { return b } if b == nil { return a } res := &Resource{ Type: a.Type, Labels: map[string]string{}, } if res.Type == "" { res.Type = b.Type } for k, v := range b.Labels { res.Labels[k] = v } // Labels from resource a overwrite labels from resource b. for k, v := range a.Labels { res.Labels[k] = v } return res } // Detector attempts to detect resource information. // If the detector cannot find resource information, the returned resource is nil but no // error is returned. // An error is only returned on unexpected failures. type Detector func(context.Context) (*Resource, error) // MultiDetector returns a Detector that calls all input detectors in order and // merges each result with the previous one. In case a type of label key is already set, // the first set value is takes precedence. // It returns on the first error that a sub-detector encounters. func MultiDetector(detectors ...Detector) Detector { return func(ctx context.Context) (*Resource, error) { return detectAll(ctx, detectors...) } } // detectall calls all input detectors sequentially an merges each result with the previous one. // It returns on the first error that a sub-detector encounters. func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { var res *Resource for _, d := range detectors { r, err := d(ctx) if err != nil { return nil, err } res = merge(res, r) } return res, nil } opencensus-go-0.24.0/resource/resource_test.go000066400000000000000000000100061433102037600214220ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package resource import ( "context" "errors" "fmt" "reflect" "testing" ) func TestMerge(t *testing.T) { cases := []struct { a, b, want *Resource }{ { a: &Resource{ Type: "t1", Labels: map[string]string{"a": "1", "b": "2"}, }, b: &Resource{ Type: "t2", Labels: map[string]string{"a": "1", "b": "3", "c": "4"}, }, want: &Resource{ Type: "t1", Labels: map[string]string{"a": "1", "b": "2", "c": "4"}, }, }, { a: nil, b: &Resource{ Type: "t1", Labels: map[string]string{"a": "1"}, }, want: &Resource{ Type: "t1", Labels: map[string]string{"a": "1"}, }, }, { a: &Resource{ Type: "t1", Labels: map[string]string{"a": "1"}, }, b: nil, want: &Resource{ Type: "t1", Labels: map[string]string{"a": "1"}, }, }, } for i, c := range cases { t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { res := merge(c.a, c.b) if !reflect.DeepEqual(res, c.want) { t.Fatalf("unwanted result: want %+v, got %+v", c.want, res) } }) } } func TestDecodeLabels(t *testing.T) { cases := []struct { encoded string wantLabels map[string]string wantFail bool }{ { encoded: `example.org/test-1="test $ \"" , Abc="Def"`, wantLabels: map[string]string{"example.org/test-1": "test $ \"", "Abc": "Def"}, }, { encoded: `single="key"`, wantLabels: map[string]string{"single": "key"}, }, {encoded: `invalid-char-ü="test"`, wantFail: true}, {encoded: `invalid-char="ü-test"`, wantFail: true}, {encoded: `missing="trailing-quote`, wantFail: true}, {encoded: `missing=leading-quote"`, wantFail: true}, {encoded: `extra="chars", a`, wantFail: true}, } for i, c := range cases { t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { res, err := DecodeLabels(c.encoded) if err != nil && !c.wantFail { t.Fatalf("unwanted error: %s", err) } if c.wantFail && err == nil { t.Fatalf("wanted failure but got none, result: %v", res) } if !reflect.DeepEqual(res, c.wantLabels) { t.Fatalf("wanted result %v, got %v", c.wantLabels, res) } }) } } func TestEncodeLabels(t *testing.T) { got := EncodeLabels(map[string]string{ "example.org/test-1": "test ¥ \"", "un": "quøted", "Abc": "Def", }) if want := `Abc="Def",example.org/test-1="test ¥ \"",un="quøted"`; got != want { t.Fatalf("got %q, want %q", got, want) } } func TestMultiDetector(t *testing.T) { got, err := MultiDetector( func(context.Context) (*Resource, error) { return &Resource{ Type: "t1", Labels: map[string]string{"a": "1", "b": "2"}, }, nil }, func(context.Context) (*Resource, error) { return &Resource{ Type: "t2", Labels: map[string]string{"a": "11", "c": "3"}, }, nil }, )(context.Background()) if err != nil { t.Fatalf("unexpected error: %s", err) } want := &Resource{ Type: "t1", Labels: map[string]string{"a": "1", "b": "2", "c": "3"}, } if !reflect.DeepEqual(got, want) { t.Fatalf("unexpected resource: want %v, got %v", want, got) } wantErr := errors.New("err1") _, err = MultiDetector( func(context.Context) (*Resource, error) { return &Resource{ Type: "t1", Labels: map[string]string{"a": "1", "b": "2"}, }, nil }, func(context.Context) (*Resource, error) { return nil, wantErr }, )(context.Background()) if err != wantErr { t.Fatalf("unexpected error: want %v, got %v", wantErr, err) } } opencensus-go-0.24.0/resource/resourcekeys/000077500000000000000000000000001433102037600207335ustar00rootroot00000000000000opencensus-go-0.24.0/resource/resourcekeys/const.go000066400000000000000000000041521433102037600224120ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package resourcekeys contains well known type and label keys for resources. package resourcekeys // import "go.opencensus.io/resource/resourcekeys" // Constants for Kubernetes resources. const ( K8SType = "k8s" // A uniquely identifying name for the Kubernetes cluster. Kubernetes // does not have cluster names as an internal concept so this may be // set to any meaningful value within the environment. For example, // GKE clusters have a name which can be used for this label. K8SKeyClusterName = "k8s.cluster.name" K8SKeyNamespaceName = "k8s.namespace.name" K8SKeyPodName = "k8s.pod.name" K8SKeyDeploymentName = "k8s.deployment.name" ) // Constants for Container resources. const ( ContainerType = "container" // A uniquely identifying name for the Container. ContainerKeyName = "container.name" ContainerKeyImageName = "container.image.name" ContainerKeyImageTag = "container.image.tag" ) // Constants for Cloud resources. const ( CloudType = "cloud" CloudKeyProvider = "cloud.provider" CloudKeyAccountID = "cloud.account.id" CloudKeyRegion = "cloud.region" CloudKeyZone = "cloud.zone" // Cloud Providers CloudProviderAWS = "aws" CloudProviderGCP = "gcp" CloudProviderAZURE = "azure" ) // Constants for Host resources. const ( HostType = "host" // A uniquely identifying name for the host. HostKeyName = "host.name" // A hostname as returned by the 'hostname' command on host machine. HostKeyHostName = "host.hostname" HostKeyID = "host.id" HostKeyType = "host.type" ) opencensus-go-0.24.0/stats/000077500000000000000000000000001433102037600155175ustar00rootroot00000000000000opencensus-go-0.24.0/stats/benchmark_test.go000066400000000000000000000057051433102037600210460ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats_test import ( "context" "testing" "go.opencensus.io/stats" "go.opencensus.io/stats/view" _ "go.opencensus.io/stats/view" // enable collection "go.opencensus.io/tag" ) var m = makeMeasure() func BenchmarkRecord0(b *testing.B) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { stats.Record(ctx) } } func BenchmarkRecord1(b *testing.B) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { stats.Record(ctx, m.M(1)) } } func BenchmarkRecord8(b *testing.B) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { stats.Record(ctx, m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1)) } } func BenchmarkRecord8_WithRecorder(b *testing.B) { ctx := context.Background() meter := view.NewMeter() meter.Start() defer meter.Stop() b.ResetTimer() for i := 0; i < b.N; i++ { // Note that this benchmark has one extra allocation for stats.WithRecorder. // If you cache the recorder option, this benchmark should be equally fast as BenchmarkRecord8 stats.RecordWithOptions(ctx, stats.WithRecorder(meter), stats.WithMeasurements(m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1))) } b.StopTimer() } func BenchmarkRecord8_Parallel(b *testing.B) { ctx := context.Background() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { stats.Record(ctx, m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1)) } }) } func BenchmarkRecord8_8Tags(b *testing.B) { ctx := context.Background() key1 := tag.MustNewKey("key1") key2 := tag.MustNewKey("key2") key3 := tag.MustNewKey("key3") key4 := tag.MustNewKey("key4") key5 := tag.MustNewKey("key5") key6 := tag.MustNewKey("key6") key7 := tag.MustNewKey("key7") key8 := tag.MustNewKey("key8") tag.New(ctx, tag.Insert(key1, "value"), tag.Insert(key2, "value"), tag.Insert(key3, "value"), tag.Insert(key4, "value"), tag.Insert(key5, "value"), tag.Insert(key6, "value"), tag.Insert(key7, "value"), tag.Insert(key8, "value"), ) b.ResetTimer() for i := 0; i < b.N; i++ { stats.Record(ctx, m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1)) } } func makeMeasure() *stats.Int64Measure { m := stats.Int64("m", "test measure", "") v := &view.View{ Measure: m, Aggregation: view.Sum(), } if err := view.Register(v); err != nil { panic(err.Error()) } return m } opencensus-go-0.24.0/stats/doc.go000066400000000000000000000054251433102037600166210ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /* Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. # Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures to collect from a server. Measure constructors such as Int64 and Float64 automatically register the measure by the given name. Each registered measure needs to be unique by name. Measures also have a description and a unit. Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. # Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms latency event. Measurements are created from measures with the current context. Tags from the current context are recorded with the measurements if they are any. Recorded measurements are dropped immediately if no views are registered for them. There is usually no need to conditionally enable and disable recording to reduce cost. Recording of measurements is cheap. Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. # Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. When aggregated using a Distribution aggregation, an exemplar is kept for each bucket in the Distribution. This allows you to easily find an example of a measurement that fell into each bucket. For example, if you also use the OpenCensus trace package and you record a measurement with a context that contains a sampled trace span, then the trace span will be added to the exemplar associated with the measurement. When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. */ package stats // import "go.opencensus.io/stats" opencensus-go-0.24.0/stats/example_test.go000066400000000000000000000021461433102037600205430ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats_test import ( "context" "go.opencensus.io/stats" ) func ExampleRecord() { ctx := context.Background() // Measures are usually declared as package-private global variables. openConns := stats.Int64("example.com/measure/openconns", "open connections", stats.UnitDimensionless) // Instrumented packages call stats.Record() to record measuremens. stats.Record(ctx, openConns.M(124)) // Record 124 open connections. // Without any views or exporters registered, this statement has no observable effects. } opencensus-go-0.24.0/stats/internal/000077500000000000000000000000001433102037600173335ustar00rootroot00000000000000opencensus-go-0.24.0/stats/internal/record.go000066400000000000000000000023451433102037600211440ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "go.opencensus.io/tag" ) // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) // MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but // avoids interface{} conversion. // This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, // but is interface{} here to avoid import loops var MeasurementRecorder interface{} // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) opencensus-go-0.24.0/stats/measure.go000066400000000000000000000062201433102037600175070ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package stats import ( "sync" "sync/atomic" ) // Measure represents a single numeric value to be tracked and recorded. // For example, latency, request bytes, and response bytes could be measures // to collect from a server. // // Measures by themselves have no outside effects. In order to be exported, // the measure needs to be used in a View. If no Views are defined over a // measure, there is very little cost in recording it. type Measure interface { // Name returns the name of this measure. // // Measure names are globally unique (among all libraries linked into your program). // We recommend prefixing the measure name with a domain name relevant to your // project or application. // // Measure names are never sent over the wire or exported to backends. // They are only used to create Views. Name() string // Description returns the human-readable description of this measure. Description() string // Unit returns the units for the values this measure takes on. // // Units are encoded according to the case-sensitive abbreviations from the // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html Unit() string } // measureDescriptor is the untyped descriptor associated with each measure. // Int64Measure and Float64Measure wrap measureDescriptor to provide typed // recording APIs. // Two Measures with the same name will have the same measureDescriptor. type measureDescriptor struct { subs int32 // access atomically name string description string unit string } func (m *measureDescriptor) subscribe() { atomic.StoreInt32(&m.subs, 1) } func (m *measureDescriptor) subscribed() bool { return atomic.LoadInt32(&m.subs) == 1 } var ( mu sync.RWMutex measures = make(map[string]*measureDescriptor) ) func registerMeasureHandle(name, desc, unit string) *measureDescriptor { mu.Lock() defer mu.Unlock() if stored, ok := measures[name]; ok { return stored } m := &measureDescriptor{ name: name, description: desc, unit: unit, } measures[name] = m return m } // Measurement is the numeric value measured when recording stats. Each measure // provides methods to create measurements of their kind. For example, Int64Measure // provides M to convert an int64 into a measurement. type Measurement struct { v float64 m Measure desc *measureDescriptor } // Value returns the value of the Measurement as a float64. func (m Measurement) Value() float64 { return m.v } // Measure returns the Measure from which this Measurement was created. func (m Measurement) Measure() Measure { return m.m } opencensus-go-0.24.0/stats/measure_float64.go000066400000000000000000000030401433102037600210430ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package stats // Float64Measure is a measure for float64 values. type Float64Measure struct { desc *measureDescriptor } // M creates a new float64 measurement. // Use Record to record measurements. func (m *Float64Measure) M(v float64) Measurement { return Measurement{ m: m, desc: m.desc, v: v, } } // Float64 creates a new measure for float64 values. // // See the documentation for interface Measure for more guidance on the // parameters of this function. func Float64(name, description, unit string) *Float64Measure { mi := registerMeasureHandle(name, description, unit) return &Float64Measure{mi} } // Name returns the name of the measure. func (m *Float64Measure) Name() string { return m.desc.name } // Description returns the description of the measure. func (m *Float64Measure) Description() string { return m.desc.description } // Unit returns the unit of the measure. func (m *Float64Measure) Unit() string { return m.desc.unit } opencensus-go-0.24.0/stats/measure_int64.go000066400000000000000000000030151433102037600205320ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package stats // Int64Measure is a measure for int64 values. type Int64Measure struct { desc *measureDescriptor } // M creates a new int64 measurement. // Use Record to record measurements. func (m *Int64Measure) M(v int64) Measurement { return Measurement{ m: m, desc: m.desc, v: float64(v), } } // Int64 creates a new measure for int64 values. // // See the documentation for interface Measure for more guidance on the // parameters of this function. func Int64(name, description, unit string) *Int64Measure { mi := registerMeasureHandle(name, description, unit) return &Int64Measure{mi} } // Name returns the name of the measure. func (m *Int64Measure) Name() string { return m.desc.name } // Description returns the description of the measure. func (m *Int64Measure) Description() string { return m.desc.description } // Unit returns the unit of the measure. func (m *Int64Measure) Unit() string { return m.desc.unit } opencensus-go-0.24.0/stats/record.go000066400000000000000000000105151433102037600173260ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package stats import ( "context" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) func init() { internal.SubscriptionReporter = func(measure string) { mu.Lock() measures[measure].subscribe() mu.Unlock() } } // Recorder provides an interface for exporting measurement information from // the static Record method by using the WithRecorder option. type Recorder interface { // Record records a set of measurements associated with the given tags and attachments. // The second argument is a `[]Measurement`. Record(*tag.Map, interface{}, map[string]interface{}) } type recordOptions struct { attachments metricdata.Attachments mutators []tag.Mutator measurements []Measurement recorder Recorder } // WithAttachments applies provided exemplar attachments. func WithAttachments(attachments metricdata.Attachments) Options { return func(ro *recordOptions) { ro.attachments = attachments } } // WithTags applies provided tag mutators. func WithTags(mutators ...tag.Mutator) Options { return func(ro *recordOptions) { ro.mutators = mutators } } // WithMeasurements applies provided measurements. func WithMeasurements(measurements ...Measurement) Options { return func(ro *recordOptions) { ro.measurements = measurements } } // WithRecorder records the measurements to the specified `Recorder`, rather // than to the global metrics recorder. func WithRecorder(meter Recorder) Options { return func(ro *recordOptions) { ro.recorder = meter } } // Options apply changes to recordOptions. type Options func(*recordOptions) func createRecordOption(ros ...Options) *recordOptions { o := &recordOptions{} for _, ro := range ros { ro(o) } return o } type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality // (RecordOptions) we can reduce some allocations to speed up this hot path if len(ms) == 0 { return } recorder := internal.MeasurementRecorder.(measurementRecorder) record := false for _, m := range ms { if m.desc.subscribed() { record = true break } } if !record { return } recorder(tag.FromContext(ctx), ms, nil) return } // RecordWithTags records one or multiple measurements at once. // // Measurements will be tagged with the tags in the context mutated by the mutators. // RecordWithTags is useful if you want to record with tag mutations but don't want // to propagate the mutations in the context. func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) } // RecordWithOptions records measurements from the given options (if any) against context // and tags and attachments in the options (if any). // If there are any tags in the context, measurements will be tagged with them. func RecordWithOptions(ctx context.Context, ros ...Options) error { o := createRecordOption(ros...) if len(o.measurements) == 0 { return nil } recorder := internal.DefaultRecorder if o.recorder != nil { recorder = o.recorder.Record } if recorder == nil { return nil } record := false for _, m := range o.measurements { if m.desc.subscribed() { record = true break } } if !record { return nil } if len(o.mutators) > 0 { var err error if ctx, err = tag.New(ctx, o.mutators...); err != nil { return err } } recorder(tag.FromContext(ctx), o.measurements, o.attachments) return nil } opencensus-go-0.24.0/stats/record_test.go000066400000000000000000000146221433102037600203700ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stats_test import ( "context" "log" "reflect" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.opencensus.io/trace" ) var ( tid = trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 4, 8, 16, 32, 64, 128} sid = trace.SpanID{1, 2, 4, 8, 16, 32, 64, 128} spanCtx = trace.SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: 1, } ) func TestRecordWithAttachments(t *testing.T) { k1 := tag.MustNewKey("k1") k2 := tag.MustNewKey("k2") distribution := view.Distribution(5, 10) m := stats.Int64("TestRecordWithAttachments/m1", "", stats.UnitDimensionless) v := &view.View{ Name: "test_view", TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: distribution, } view.SetReportingPeriod(100 * time.Millisecond) if err := view.Register(v); err != nil { log.Fatalf("Failed to register views: %v", err) } defer view.Unregister(v) attachments := map[string]interface{}{metricdata.AttachmentKeySpanContext: spanCtx} stats.RecordWithOptions(context.Background(), stats.WithAttachments(attachments), stats.WithMeasurements(m.M(12))) rows, err := view.RetrieveData("test_view") if err != nil { t.Errorf("Failed to retrieve data %v", err) } if len(rows) == 0 { t.Errorf("No data was recorded.") } data := rows[0].Data dis, ok := data.(*view.DistributionData) if !ok { t.Errorf("want DistributionData, got %+v", data) } wantBuckets := []int64{0, 0, 1} if !reflect.DeepEqual(dis.CountPerBucket, wantBuckets) { t.Errorf("want buckets %v, got %v", wantBuckets, dis.CountPerBucket) } for i, e := range dis.ExemplarsPerBucket { // Exemplar slice should be [nil, nil, exemplar] if i != 2 && e != nil { t.Errorf("want nil exemplar, got %v", e) } if i == 2 { wantExemplar := &metricdata.Exemplar{Value: 12, Attachments: attachments} if diff := cmpExemplar(e, wantExemplar); diff != "" { t.Fatalf("Unexpected Exemplar -got +want: %s", diff) } } } } // Compare exemplars while ignoring exemplar timestamp, since timestamp is non-deterministic. func cmpExemplar(got, want *metricdata.Exemplar) string { return cmp.Diff(got, want, cmpopts.IgnoreFields(metricdata.Exemplar{}, "Timestamp"), cmpopts.IgnoreUnexported(metricdata.Exemplar{})) } func TestRecordWithMeter(t *testing.T) { meter := view.NewMeter() meter.Start() defer meter.Stop() k1 := tag.MustNewKey("k1") k2 := tag.MustNewKey("k2") m1 := stats.Int64("TestResolveOptions/m1", "", stats.UnitDimensionless) m2 := stats.Int64("TestResolveOptions/m2", "", stats.UnitDimensionless) v := []*view.View{{ Name: "test_view", TagKeys: []tag.Key{k1, k2}, Measure: m1, Aggregation: view.Distribution(5, 10), }, { Name: "second_view", TagKeys: []tag.Key{k1}, Measure: m2, Aggregation: view.Count(), }} meter.SetReportingPeriod(100 * time.Millisecond) if err := meter.Register(v...); err != nil { t.Fatalf("Failed to register view: %v", err) } defer meter.Unregister(v...) attachments := map[string]interface{}{metricdata.AttachmentKeySpanContext: spanCtx} ctx, err := tag.New(context.Background(), tag.Insert(k1, "foo"), tag.Insert(k2, "foo")) if err != nil { t.Fatalf("Failed to set context: %v", err) } err = stats.RecordWithOptions(ctx, stats.WithTags(tag.Upsert(k1, "bar"), tag.Insert(k2, "bar")), stats.WithAttachments(attachments), stats.WithMeasurements(m1.M(12), m1.M(6), m2.M(5)), stats.WithRecorder(meter)) if err != nil { t.Fatalf("Failed to resolve data point: %v", err) } rows, err := meter.RetrieveData("test_view") if err != nil { t.Fatalf("Unable to retrieve data for test_view: %v", err) } if len(rows) != 1 { t.Fatalf("Expected one row, got %d rows: %+v", len(rows), rows) } if len(rows[0].Tags) != 2 { t.Errorf("Wrong number of tags %d: %v", len(rows[0].Tags), rows[0].Tags) } // k2 was Insert() ed, and shouldn't update the value that was in the supplied context. wantTags := []tag.Tag{{Key: k1, Value: "bar"}, {Key: k2, Value: "foo"}} for i, tag := range rows[0].Tags { if tag.Key != wantTags[i].Key { t.Errorf("Incorrect tag %d, want: %q, got: %q", i, wantTags[i].Key, tag.Key) } if tag.Value != wantTags[i].Value { t.Errorf("Incorrect tag for %s, want: %q, got: %v", tag.Key, wantTags[i].Value, tag.Value) } } wantBuckets := []int64{0, 1, 1} gotBuckets := rows[0].Data.(*view.DistributionData) if !reflect.DeepEqual(gotBuckets.CountPerBucket, wantBuckets) { t.Fatalf("want buckets %v, got %v", wantBuckets, gotBuckets) } for i, e := range gotBuckets.ExemplarsPerBucket { if gotBuckets.CountPerBucket[i] == 0 { if e != nil { t.Errorf("Unexpected exemplar for bucket") } continue } // values from the metrics above exemplarValues := []float64{0, 6, 12} wantExemplar := &metricdata.Exemplar{Value: exemplarValues[i], Attachments: attachments} if diff := cmpExemplar(e, wantExemplar); diff != "" { t.Errorf("Bad exemplar for %d: %+v", i, diff) } } rows2, err := meter.RetrieveData("second_view") if err != nil { t.Fatalf("Failed to read second_view: %v", err) } if len(rows2) != 1 { t.Fatalf("Expected one row, got %d rows: %v", len(rows2), rows2) } if len(rows2[0].Tags) != 1 { t.Errorf("Expected one tag, got %d tags: %v", len(rows2[0].Tags), rows2[0].Tags) } wantTags = []tag.Tag{{Key: k1, Value: "bar"}} for i, tag := range rows2[0].Tags { if wantTags[i].Key != tag.Key { t.Errorf("Wrong key for %d, want %q, got %q", i, wantTags[i].Key, tag.Key) } if wantTags[i].Value != tag.Value { t.Errorf("Wrong value for tag %s, want %q got %q", tag.Key, wantTags[i].Value, tag.Value) } } gotCount := rows2[0].Data.(*view.CountData) if gotCount.Value != 1 { t.Errorf("Wrong count for second_view, want %d, got %d", 1, gotCount.Value) } } opencensus-go-0.24.0/stats/units.go000066400000000000000000000016541433102037600172160ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package stats // Units are encoded according to the case-sensitive abbreviations from the // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html const ( UnitNone = "1" // Deprecated: Use UnitDimensionless. UnitDimensionless = "1" UnitBytes = "By" UnitMilliseconds = "ms" UnitSeconds = "s" ) opencensus-go-0.24.0/stats/view/000077500000000000000000000000001433102037600164715ustar00rootroot00000000000000opencensus-go-0.24.0/stats/view/aggregation.go000066400000000000000000000074171433102037600213200ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import "time" // AggType represents the type of aggregation function used on a View. type AggType int // All available aggregation types. const ( AggTypeNone AggType = iota // no aggregation; reserved for future use. AggTypeCount // the count aggregation, see Count. AggTypeSum // the sum aggregation, see Sum. AggTypeDistribution // the distribution aggregation, see Distribution. AggTypeLastValue // the last value aggregation, see LastValue. ) func (t AggType) String() string { return aggTypeName[t] } var aggTypeName = map[AggType]string{ AggTypeNone: "None", AggTypeCount: "Count", AggTypeSum: "Sum", AggTypeDistribution: "Distribution", AggTypeLastValue: "LastValue", } // Aggregation represents a data aggregation method. Use one of the functions: // Count, Sum, or Distribution to construct an Aggregation. type Aggregation struct { Type AggType // Type is the AggType of this Aggregation. Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. newData func(time.Time) AggregationData } var ( aggCount = &Aggregation{ Type: AggTypeCount, newData: func(t time.Time) AggregationData { return &CountData{Start: t} }, } aggSum = &Aggregation{ Type: AggTypeSum, newData: func(t time.Time) AggregationData { return &SumData{Start: t} }, } ) // Count indicates that data collected and aggregated // with this method will be turned into a count value. // For example, total number of accepted requests can be // aggregated by using Count. func Count() *Aggregation { return aggCount } // Sum indicates that data collected and aggregated // with this method will be summed up. // For example, accumulated request bytes can be aggregated by using // Sum. func Sum() *Aggregation { return aggSum } // Distribution indicates that the desired aggregation is // a histogram distribution. // // A distribution aggregation may contain a histogram of the values in the // population. The bucket boundaries for that histogram are described // by the bounds. This defines len(bounds)+1 buckets. // // If len(bounds) >= 2 then the boundaries for bucket index i are: // // [-infinity, bounds[i]) for i = 0 // [bounds[i-1], bounds[i]) for 0 < i < length // [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries // (-infinity, +infinity). // // If len(bounds) is 1 then there is no finite buckets, and that single // element is the common boundary of the overflow and underflow buckets. func Distribution(bounds ...float64) *Aggregation { agg := &Aggregation{ Type: AggTypeDistribution, Buckets: bounds, } agg.newData = func(t time.Time) AggregationData { return newDistributionData(agg, t) } return agg } // LastValue only reports the last value recorded using this // aggregation. All other measurements will be dropped. func LastValue() *Aggregation { return &Aggregation{ Type: AggTypeLastValue, newData: func(_ time.Time) AggregationData { return &LastValueData{} }, } } opencensus-go-0.24.0/stats/view/aggregation_data.go000066400000000000000000000221351433102037600223030ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "math" "time" "go.opencensus.io/metric/metricdata" ) // AggregationData represents an aggregated value from a collection. // They are reported on the view data during exporting. // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool addSample(v float64, attachments map[string]interface{}, t time.Time) clone() AggregationData equal(other AggregationData) bool toPoint(t metricdata.Type, time time.Time) metricdata.Point StartTime() time.Time } const epsilon = 1e-9 // CountData is the aggregated data for the Count aggregation. // A count aggregation processes data and counts the recordings. // // Most users won't directly access count data. type CountData struct { Start time.Time Value int64 } func (a *CountData) isAggregationData() bool { return true } func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { a.Value = a.Value + 1 } func (a *CountData) clone() AggregationData { return &CountData{Value: a.Value, Start: a.Start} } func (a *CountData) equal(other AggregationData) bool { a2, ok := other.(*CountData) if !ok { return false } return a.Start.Equal(a2.Start) && a.Value == a2.Value } func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { switch metricType { case metricdata.TypeCumulativeInt64: return metricdata.NewInt64Point(t, a.Value) default: panic("unsupported metricdata.Type") } } // StartTime returns the start time of the data being aggregated by CountData. func (a *CountData) StartTime() time.Time { return a.Start } // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // // Most users won't directly access sum data. type SumData struct { Start time.Time Value float64 } func (a *SumData) isAggregationData() bool { return true } func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { a.Value += v } func (a *SumData) clone() AggregationData { return &SumData{Value: a.Value, Start: a.Start} } func (a *SumData) equal(other AggregationData) bool { a2, ok := other.(*SumData) if !ok { return false } return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon } func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { switch metricType { case metricdata.TypeCumulativeInt64: return metricdata.NewInt64Point(t, int64(a.Value)) case metricdata.TypeCumulativeFloat64: return metricdata.NewFloat64Point(t, a.Value) default: panic("unsupported metricdata.Type") } } // StartTime returns the start time of the data being aggregated by SumData. func (a *SumData) StartTime() time.Time { return a.Start } // DistributionData is the aggregated data for the // Distribution aggregation. // // Most users won't directly access distribution data. // // For a distribution with N bounds, the associated DistributionData will have // N+1 buckets. type DistributionData struct { Count int64 // number of data points aggregated Min float64 // minimum value in the distribution Max float64 // max value in the distribution Mean float64 // mean of the distribution SumOfSquaredDev float64 // sum of the squared deviation from the mean CountPerBucket []int64 // number of occurrences per bucket // ExemplarsPerBucket is slice the same length as CountPerBucket containing // an exemplar for the associated bucket, or nil. ExemplarsPerBucket []*metricdata.Exemplar bounds []float64 // histogram distribution of the values Start time.Time } func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { bucketCount := len(agg.Buckets) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), bounds: agg.Buckets, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, Start: t, } } // Sum returns the sum of all samples collected. func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } func (a *DistributionData) variance() float64 { if a.Count <= 1 { return 0 } return a.SumOfSquaredDev / float64(a.Count-1) } func (a *DistributionData) isAggregationData() bool { return true } // TODO(songy23): support exemplar attachments. func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { if v < a.Min { a.Min = v } if v > a.Max { a.Max = v } a.Count++ a.addToBucket(v, attachments, t) if a.Count == 1 { a.Mean = v return } oldMean := a.Mean a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) } func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { var count *int64 var i int var b float64 for i, b = range a.bounds { if v < b { count = &a.CountPerBucket[i] break } } if count == nil { // Last bucket. i = len(a.bounds) count = &a.CountPerBucket[i] } *count++ if exemplar := getExemplar(v, attachments, t); exemplar != nil { a.ExemplarsPerBucket[i] = exemplar } } func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { if len(attachments) == 0 { return nil } return &metricdata.Exemplar{ Value: v, Timestamp: t, Attachments: attachments, } } func (a *DistributionData) clone() AggregationData { c := *a c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) return &c } func (a *DistributionData) equal(other AggregationData) bool { a2, ok := other.(*DistributionData) if !ok { return false } if a2 == nil { return false } if len(a.CountPerBucket) != len(a2.CountPerBucket) { return false } for i := range a.CountPerBucket { if a.CountPerBucket[i] != a2.CountPerBucket[i] { return false } } return a.Start.Equal(a2.Start) && a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { switch metricType { case metricdata.TypeCumulativeDistribution: buckets := []metricdata.Bucket{} for i := 0; i < len(a.CountPerBucket); i++ { buckets = append(buckets, metricdata.Bucket{ Count: a.CountPerBucket[i], Exemplar: a.ExemplarsPerBucket[i], }) } bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} val := &metricdata.Distribution{ Count: a.Count, Sum: a.Sum(), SumOfSquaredDeviation: a.SumOfSquaredDev, BucketOptions: bucketOptions, Buckets: buckets, } return metricdata.NewDistributionPoint(t, val) default: // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. panic("unsupported metricdata.Type") } } // StartTime returns the start time of the data being aggregated by DistributionData. func (a *DistributionData) StartTime() time.Time { return a.Start } // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 } func (l *LastValueData) isAggregationData() bool { return true } func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { l.Value = v } func (l *LastValueData) clone() AggregationData { return &LastValueData{l.Value} } func (l *LastValueData) equal(other AggregationData) bool { a2, ok := other.(*LastValueData) if !ok { return false } return l.Value == a2.Value } func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { switch metricType { case metricdata.TypeGaugeInt64: return metricdata.NewInt64Point(t, int64(l.Value)) case metricdata.TypeGaugeFloat64: return metricdata.NewFloat64Point(t, l.Value) default: panic("unsupported metricdata.Type") } } // StartTime returns an empty time value as start time is not recorded when using last value // aggregation. func (l *LastValueData) StartTime() time.Time { return time.Time{} } // ClearStart clears the Start field from data if present. Useful for testing in cases where the // start time will be nondeterministic. func ClearStart(data AggregationData) { switch data := data.(type) { case *CountData: data.Start = time.Time{} case *SumData: data.Start = time.Time{} case *DistributionData: data.Start = time.Time{} } } opencensus-go-0.24.0/stats/view/aggregation_data_test.go000066400000000000000000000073571433102037600233530ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "reflect" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go.opencensus.io/metric/metricdata" ) func TestDataClone(t *testing.T) { agg := &Aggregation{ Buckets: []float64{1, 2, 3, 4}, } dist := newDistributionData(agg, time.Time{}) dist.Count = 7 dist.Max = 11 dist.Min = 1 dist.CountPerBucket = []int64{0, 2, 3, 2} dist.Mean = 4 dist.SumOfSquaredDev = 1.2 tests := []struct { name string src AggregationData }{ { name: "count data", src: &CountData{Value: 5}, }, { name: "distribution data", src: dist, }, { name: "sum data", src: &SumData{Value: 65.7}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.src.clone() if !reflect.DeepEqual(got, tt.src) { t.Errorf("AggregationData.clone() = %v, want %v", got, tt.src) } // TODO(jbd): Make sure that data is deep copied. if got == tt.src { t.Errorf("AggregationData.clone() returned the same pointer") } }) } } func TestDistributionData_addSample(t *testing.T) { agg := &Aggregation{ Buckets: []float64{1, 2}, } dd := newDistributionData(agg, time.Time{}) attachments1 := map[string]interface{}{"key1": "value1"} t1 := time.Now() dd.addSample(0.5, attachments1, t1) e1 := &metricdata.Exemplar{Value: 0.5, Timestamp: t1, Attachments: attachments1} want := &DistributionData{ Count: 1, CountPerBucket: []int64{1, 0, 0}, ExemplarsPerBucket: []*metricdata.Exemplar{e1, nil, nil}, Max: 0.5, Min: 0.5, Mean: 0.5, SumOfSquaredDev: 0, } if diff := cmpDD(dd, want); diff != "" { t.Fatalf("Unexpected DistributionData -got +want: %s", diff) } attachments2 := map[string]interface{}{"key2": "value2"} t2 := t1.Add(time.Microsecond) dd.addSample(0.7, attachments2, t2) // Previous exemplar should be overwritten. e2 := &metricdata.Exemplar{Value: 0.7, Timestamp: t2, Attachments: attachments2} want = &DistributionData{ Count: 2, CountPerBucket: []int64{2, 0, 0}, ExemplarsPerBucket: []*metricdata.Exemplar{e2, nil, nil}, Max: 0.7, Min: 0.5, Mean: 0.6, SumOfSquaredDev: 0, } if diff := cmpDD(dd, want); diff != "" { t.Fatalf("Unexpected DistributionData -got +want: %s", diff) } attachments3 := map[string]interface{}{"key3": "value3"} t3 := t2.Add(time.Microsecond) dd.addSample(1.2, attachments3, t3) // e3 is at another bucket. e2 should still be there. e3 := &metricdata.Exemplar{Value: 1.2, Timestamp: t3, Attachments: attachments3} want = &DistributionData{ Count: 3, CountPerBucket: []int64{2, 1, 0}, ExemplarsPerBucket: []*metricdata.Exemplar{e2, e3, nil}, Max: 1.2, Min: 0.5, Mean: 0.7999999999999999, SumOfSquaredDev: 0, } if diff := cmpDD(dd, want); diff != "" { t.Fatalf("Unexpected DistributionData -got +want: %s", diff) } } func cmpDD(got, want *DistributionData) string { return cmp.Diff(got, want, cmpopts.IgnoreFields(DistributionData{}, "SumOfSquaredDev"), cmpopts.IgnoreUnexported(DistributionData{})) } opencensus-go-0.24.0/stats/view/benchmark_test.go000066400000000000000000000053211433102037600220120ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "context" "fmt" "testing" "time" "go.opencensus.io/stats" "go.opencensus.io/tag" ) var ( m = stats.Float64("m", "", "") k1 = tag.MustNewKey("k1") k2 = tag.MustNewKey("k2") k3 = tag.MustNewKey("k3") k4 = tag.MustNewKey("k4") k5 = tag.MustNewKey("k5") k6 = tag.MustNewKey("k6") k7 = tag.MustNewKey("k7") k8 = tag.MustNewKey("k8") view = &View{ Measure: m, Aggregation: Distribution(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), TagKeys: []tag.Key{k1, k2}, } ) // BenchmarkRecordReqCommand benchmarks calling the internal recording machinery // directly. func BenchmarkRecordReqCommand(b *testing.B) { w := NewMeter().(*worker) register := ®isterViewReq{views: []*View{view}, err: make(chan error, 1)} register.handleCommand(w) if err := <-register.err; err != nil { b.Fatal(err) } ctxs := prepareContexts(10) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { record := &recordReq{ ms: []stats.Measurement{ m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), }, tm: tag.FromContext(ctxs[i%len(ctxs)]), t: time.Now(), } record.handleCommand(w) } } func BenchmarkRecordViaStats(b *testing.B) { meter := NewMeter() meter.Start() defer meter.Stop() meter.Register(view) defer meter.Unregister(view) ctxs := prepareContexts(10) rec := stats.WithRecorder(meter) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { stats.RecordWithOptions(ctxs[i%len(ctxs)], rec, stats.WithMeasurements(m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1), m.M(1))) } } func prepareContexts(tagCount int) []context.Context { ctxs := make([]context.Context, 0, tagCount) for i := 0; i < tagCount; i++ { ctx, _ := tag.New(context.Background(), tag.Upsert(k1, fmt.Sprintf("v%d", i)), tag.Upsert(k2, fmt.Sprintf("v%d", i)), tag.Upsert(k3, fmt.Sprintf("v%d", i)), tag.Upsert(k4, fmt.Sprintf("v%d", i)), tag.Upsert(k5, fmt.Sprintf("v%d", i)), tag.Upsert(k6, fmt.Sprintf("v%d", i)), tag.Upsert(k7, fmt.Sprintf("v%d", i)), tag.Upsert(k8, fmt.Sprintf("v%d", i)), ) ctxs = append(ctxs, ctx) } return ctxs } opencensus-go-0.24.0/stats/view/collector.go000066400000000000000000000051271433102037600210130ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "sort" "time" "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" ) type collector struct { // signatures holds the aggregations values for each unique tag signature // (values for all keys) to its aggregator. signatures map[string]AggregationData // Aggregation is the description of the aggregation to perform for this // view. a *Aggregation } func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData(t) c.signatures[s] = aggregator } aggregator.addSample(v, attachments, t) } // collectRows returns a snapshot of the collected Row values. func (c *collector) collectedRows(keys []tag.Key) []*Row { rows := make([]*Row, 0, len(c.signatures)) for sig, aggregator := range c.signatures { tags := decodeTags([]byte(sig), keys) row := &Row{Tags: tags, Data: aggregator.clone()} rows = append(rows, row) } return rows } func (c *collector) clearRows() { c.signatures = make(map[string]AggregationData) } // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { // Compute the buffer length we will need ahead of time to avoid resizing later reqLen := 0 for _, k := range keys { s, _ := m.Value(k) // We will store each key + its length reqLen += len(s) + 1 } vb := &tagencoding.Values{ Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) vb.WriteValue([]byte(v)) } return vb.Bytes() } // decodeTags decodes tags from the buffer and // orders them by the keys. func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { vb := &tagencoding.Values{Buffer: buf} var tags []tag.Tag for _, k := range keys { v := vb.ReadValue() if v != nil { tags = append(tags, tag.Tag{Key: k, Value: string(v)}) } } vb.ReadIndex = 0 sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) return tags } opencensus-go-0.24.0/stats/view/collector_test.go000066400000000000000000000044461433102037600220550ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package view import ( "context" "testing" "go.opencensus.io/tag" ) func TestEncodeDecodeTags(t *testing.T) { ctx := context.Background() type testData struct { m *tag.Map keys []tag.Key want map[tag.Key][]byte } k1 = tag.MustNewKey("/encodedecodetest/k1") k2 = tag.MustNewKey("/encodedecodetest/k2") k3 = tag.MustNewKey("/encodedecodetest/k3") ctx1, _ := tag.New(ctx) ctx2, _ := tag.New(ctx, tag.Insert(k2, "v2")) ctx3, _ := tag.New(ctx, tag.Insert(k1, "v1"), tag.Insert(k2, "v2")) ctx4, _ := tag.New(ctx, tag.Insert(k1, "v1"), tag.Insert(k2, "v2"), tag.Insert(k3, "v3")) m1 := tag.FromContext(ctx1) m2 := tag.FromContext(ctx2) m3 := tag.FromContext(ctx3) m4 := tag.FromContext(ctx4) tests := []testData{ { m1, []tag.Key{k1}, nil, }, { m2, []tag.Key{}, nil, }, { m2, []tag.Key{k1}, nil, }, { m2, []tag.Key{k2}, map[tag.Key][]byte{ k2: []byte("v2"), }, }, { m3, []tag.Key{k1}, map[tag.Key][]byte{ k1: []byte("v1"), }, }, { m3, []tag.Key{k1, k2}, map[tag.Key][]byte{ k1: []byte("v1"), k2: []byte("v2"), }, }, { m4, []tag.Key{k3, k1}, map[tag.Key][]byte{ k1: []byte("v1"), k3: []byte("v3"), }, }, } for label, tt := range tests { tags := decodeTags(encodeWithKeys(tt.m, tt.keys), tt.keys) if got, want := len(tags), len(tt.want); got != want { t.Fatalf("%d: len(decoded) = %v; not %v", label, got, want) } for _, tag := range tags { if _, ok := tt.want[tag.Key]; !ok { t.Errorf("%d: missing key %v", label, tag.Key) } if got, want := tag.Value, string(tt.want[tag.Key]); got != want { t.Errorf("%d: got value %q; want %q", label, got, want) } } } } opencensus-go-0.24.0/stats/view/doc.go000066400000000000000000000036251433102037600175730ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Package view contains support for collecting and exposing aggregates over stats. // // In order to collect measurements, views need to be defined and registered. // A view allows recorded measurements to be filtered and aggregated. // // All recorded measurements can be grouped by a list of tags. // // OpenCensus provides several aggregation methods: Count, Distribution and Sum. // // Count only counts the number of measurement points recorded. // Distribution provides statistical summary of the aggregated data by counting // how many recorded measurements fall into each bucket. // Sum adds up the measurement values. // LastValue just keeps track of the most recently recorded measurement value. // All aggregations are cumulative. // // Views can be registered and unregistered at any time during program execution. // // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // // # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. // // Multiple exporters can be registered to upload the data to various // different back ends. package view // import "go.opencensus.io/stats/view" // TODO(acetechnologist): Add a link to the language independent OpenCensus // spec when it is available. opencensus-go-0.24.0/stats/view/example_test.go000066400000000000000000000023121433102037600215100ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package view_test import ( "log" "go.opencensus.io/stats" "go.opencensus.io/stats/view" ) func Example() { // Measures are usually declared and used by instrumented packages. m := stats.Int64("example.com/measure/openconns", "open connections", stats.UnitDimensionless) // Views are usually registered in your application main function. if err := view.Register(&view.View{ Name: "example.com/views/openconns", Description: "open connections", Measure: m, Aggregation: view.Distribution(0, 1000, 2000), }); err != nil { log.Fatal(err) } // Use view.RegisterExporter to export collected data. } opencensus-go-0.24.0/stats/view/export.go000066400000000000000000000030161433102037600203410ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package view // Exporter exports the collected records as view data. // // The ExportView method should return quickly; if an // Exporter takes a significant amount of time to // process a Data, that work should be done on another goroutine. // // It is safe to assume that ExportView will not be called concurrently from // multiple goroutines. // // The Data should not be modified. type Exporter interface { ExportView(viewData *Data) } // RegisterExporter registers an exporter. // Collected data will be reported via all the // registered exporters. Once you no longer // want data to be exported, invoke UnregisterExporter // with the previously registered exporter. // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { defaultWorker.RegisterExporter(e) } // UnregisterExporter unregisters an exporter. func UnregisterExporter(e Exporter) { defaultWorker.UnregisterExporter(e) } opencensus-go-0.24.0/stats/view/view.go000066400000000000000000000140641433102037600177770ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "bytes" "errors" "fmt" "reflect" "sort" "sync/atomic" "time" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" "go.opencensus.io/tag" ) // View allows users to aggregate the recorded stats.Measurements. // Views need to be passed to the Register function before data will be // collected and sent to Exporters. type View struct { Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. Description string // Description is a human-readable description for this view. // TagKeys are the tag keys describing the grouping of this view. // A single Row will be produced for each combination of associated tag values. TagKeys []tag.Key // Measure is a stats.Measure to aggregate in this view. Measure stats.Measure // Aggregation is the aggregation function to apply to the set of Measurements. Aggregation *Aggregation } // WithName returns a copy of the View with a new name. This is useful for // renaming views to cope with limitations placed on metric names by various // backends. func (v *View) WithName(name string) *View { vNew := *v vNew.Name = name return &vNew } // same compares two views and returns true if they represent the same aggregation. func (v *View) same(other *View) bool { if v == other { return true } if v == nil { return false } return reflect.DeepEqual(v.Aggregation, other.Aggregation) && v.Measure.Name() == other.Measure.Name() } // ErrNegativeBucketBounds error returned if histogram contains negative bounds. // // Deprecated: this should not be public. var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") // canonicalize canonicalizes v by setting explicit // defaults for Name and Description and sorting the TagKeys func (v *View) canonicalize() error { if v.Measure == nil { return fmt.Errorf("cannot register view %q: measure not set", v.Name) } if v.Aggregation == nil { return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) } if v.Name == "" { v.Name = v.Measure.Name() } if v.Description == "" { v.Description = v.Measure.Description() } if err := checkViewName(v.Name); err != nil { return err } sort.Slice(v.TagKeys, func(i, j int) bool { return v.TagKeys[i].Name() < v.TagKeys[j].Name() }) sort.Float64s(v.Aggregation.Buckets) for _, b := range v.Aggregation.Buckets { if b < 0 { return ErrNegativeBucketBounds } } // drop 0 bucket silently. v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) return nil } func dropZeroBounds(bounds ...float64) []float64 { for i, bound := range bounds { if bound > 0 { return bounds[i:] } } return []float64{} } // viewInternal is the internal representation of a View. type viewInternal struct { view *View // view is the canonicalized View definition associated with this view. subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access collector *collector metricDescriptor *metricdata.Descriptor } func newViewInternal(v *View) (*viewInternal, error) { return &viewInternal{ view: v, collector: &collector{make(map[string]AggregationData), v.Aggregation}, metricDescriptor: viewToMetricDescriptor(v), }, nil } func (v *viewInternal) subscribe() { atomic.StoreUint32(&v.subscribed, 1) } func (v *viewInternal) unsubscribe() { atomic.StoreUint32(&v.subscribed, 0) } // isSubscribed returns true if the view is exporting // data by subscription. func (v *viewInternal) isSubscribed() bool { return atomic.LoadUint32(&v.subscribed) == 1 } func (v *viewInternal) clearRows() { v.collector.clearRows() } func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) v.collector.addSample(sig, val, attachments, t) } // A Data is a set of rows about usage of the single measure associated // with the given view. Each row is specific to a unique set of tags. type Data struct { View *View Start, End time.Time Rows []*Row } // Row is the collected value for a specific set of key value pairs a.k.a tags. type Row struct { Tags []tag.Tag Data AggregationData } func (r *Row) String() string { var buffer bytes.Buffer buffer.WriteString("{ ") buffer.WriteString("{ ") for _, t := range r.Tags { buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) } buffer.WriteString(" }") buffer.WriteString(fmt.Sprintf("%v", r.Data)) buffer.WriteString(" }") return buffer.String() } // Equal returns true if both rows are equal. Tags are expected to be ordered // by the key name. Even if both rows have the same tags but the tags appear in // different orders it will return false. func (r *Row) Equal(other *Row) bool { if r == other { return true } return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) } const maxNameLength = 255 // Returns true if the given string contains only printable characters. func isPrintable(str string) bool { for _, r := range str { if !(r >= ' ' && r <= '~') { return false } } return true } func checkViewName(name string) error { if len(name) > maxNameLength { return fmt.Errorf("view name cannot be larger than %v", maxNameLength) } if !isPrintable(name) { return fmt.Errorf("view name needs to be an ASCII string") } return nil } opencensus-go-0.24.0/stats/view/view_measure_test.go000066400000000000000000000022321433102037600225510ustar00rootroot00000000000000package view import ( "context" "testing" "go.opencensus.io/stats" ) func TestMeasureFloat64AndInt64(t *testing.T) { // Recording through both a Float64Measure and Int64Measure with the // same name should work. im := stats.Int64("TestMeasureFloat64AndInt64", "", stats.UnitDimensionless) fm := stats.Float64("TestMeasureFloat64AndInt64", "", stats.UnitDimensionless) if im == nil || fm == nil { t.Fatal("Error creating Measures") } v1 := &View{ Name: "TestMeasureFloat64AndInt64/v1", Measure: im, Aggregation: Sum(), } v2 := &View{ Name: "TestMeasureFloat64AndInt64/v2", Measure: fm, Aggregation: Sum(), } Register(v1, v2) stats.Record(context.Background(), im.M(5)) stats.Record(context.Background(), fm.M(2.2)) d1, _ := RetrieveData(v1.Name) d2, _ := RetrieveData(v2.Name) sum1 := d1[0].Data.(*SumData) sum2 := d2[0].Data.(*SumData) // We expect both views to return 7.2, as though we recorded on a single measure. if got, want := sum1.Value, 7.2; got != want { t.Errorf("sum1 = %v; want %v", got, want) } if got, want := sum2.Value, 7.2; got != want { t.Errorf("sum2 = %v; want %v", got, want) } } opencensus-go-0.24.0/stats/view/view_test.go000066400000000000000000000307141433102037600210360ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "context" "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" "go.opencensus.io/tag" ) func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { k1 := tag.MustNewKey("k1") k2 := tag.MustNewKey("k2") k3 := tag.MustNewKey("k3") agg1 := Distribution(2) m := stats.Int64("Test_View_MeasureFloat64_AggregationDistribution/m1", "", stats.UnitDimensionless) view1 := &View{ TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: agg1, } view, err := newViewInternal(view1) if err != nil { t.Fatal(err) } type tagString struct { k tag.Key v string } type record struct { f float64 tags []tagString t time.Time } type testCase struct { label string records []record wantRows []*Row } now := time.Now() ts := make([]time.Time, 7) for i := range ts { ts[i] = now.Add(time.Duration(i) * time.Second) } tcs := []testCase{ { "1", []record{ {1, []tagString{{k1, "v1"}}, ts[0]}, {5, []tagString{{k1, "v1"}}, ts[1]}, }, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[0], }, }, }, }, { "2", []record{ {1, []tagString{{k1, "v1"}}, ts[0]}, {5, []tagString{{k2, "v2"}}, ts[1]}, }, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[0], }, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &DistributionData{ Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[1], }, }, }, }, { "3", []record{ {1, []tagString{{k1, "v1"}}, ts[0]}, {5, []tagString{{k1, "v1"}, {k3, "v3"}}, ts[1]}, {1, []tagString{{k1, "v1 other"}}, ts[2]}, {5, []tagString{{k2, "v2"}}, ts[3]}, {5, []tagString{{k1, "v1"}, {k2, "v2"}}, ts[4]}, }, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[0], }, }, { []tag.Tag{{Key: k1, Value: "v1 other"}}, &DistributionData{ Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[2], }, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &DistributionData{ Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[3], }, }, { []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, &DistributionData{ Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[4], }, }, }, }, { "4", []record{ {1, []tagString{{k1, "v1 is a very long value key"}}, ts[0]}, {5, []tagString{{k1, "v1 is a very long value key"}, {k3, "v3"}}, ts[1]}, {1, []tagString{{k1, "v1 is another very long value key"}}, ts[2]}, {1, []tagString{{k1, "v1 is a very long value key"}, {k2, "v2 is a very long value key"}}, ts[3]}, {5, []tagString{{k1, "v1 is a very long value key"}, {k2, "v2 is a very long value key"}}, ts[4]}, {3, []tagString{{k1, "v1 is a very long value key"}, {k2, "v2 is a very long value key"}}, ts[5]}, {3, []tagString{{k1, "v1 is a very long value key"}, {k2, "v2 is a very long value key"}}, ts[6]}, }, []*Row{ { []tag.Tag{{Key: k1, Value: "v1 is a very long value key"}}, &DistributionData{ Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[0], }, }, { []tag.Tag{{Key: k1, Value: "v1 is another very long value key"}}, &DistributionData{ Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[2], }, }, { []tag.Tag{{Key: k1, Value: "v1 is a very long value key"}, {Key: k2, Value: "v2 is a very long value key"}}, &DistributionData{ Count: 4, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 2.66666666666667 * 3, CountPerBucket: []int64{1, 3}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, Start: ts[3], }, }, }, }, } for _, tc := range tcs { view.clearRows() view.subscribe() for _, r := range tc.records { mods := []tag.Mutator{} for _, t := range r.tags { mods = append(mods, tag.Insert(t.k, t.v)) } ctx, err := tag.New(context.Background(), mods...) if err != nil { t.Errorf("%v: New = %v", tc.label, err) } view.addSample(tag.FromContext(ctx), r.f, nil, r.t) } gotRows := view.collectedRows() if diff := cmp.Diff(gotRows, tc.wantRows, cmpopts.SortSlices(cmpRow)); diff != "" { t.Errorf("%v: unexpected row (got-, want+): %s", tc.label, diff) break } } } func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { k1 := tag.MustNewKey("k1") k2 := tag.MustNewKey("k2") k3 := tag.MustNewKey("k3") m := stats.Int64("Test_View_MeasureFloat64_AggregationSum/m1", "", stats.UnitDimensionless) view, err := newViewInternal(&View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Sum()}) if err != nil { t.Fatal(err) } type tagString struct { k tag.Key v string } type record struct { f float64 tags []tagString t time.Time } now := time.Now() ts := make([]time.Time, 5) for i := range ts { ts[i] = now.Add(time.Duration(i) * time.Second) } tcs := []struct { label string records []record wantRows []*Row }{ { "1", []record{ {1, []tagString{{k1, "v1"}}, ts[0]}, {5, []tagString{{k1, "v1"}}, ts[1]}, }, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}}, &SumData{Value: 6, Start: ts[0]}, }, }, }, { "2", []record{ {1, []tagString{{k1, "v1"}}, ts[0]}, {5, []tagString{{k2, "v2"}}, ts[1]}, }, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}}, &SumData{Value: 1, Start: ts[0]}, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &SumData{Value: 5, Start: ts[1]}, }, }, }, { "3", []record{ {1, []tagString{{k1, "v1"}}, ts[0]}, {5, []tagString{{k1, "v1"}, {k3, "v3"}}, ts[1]}, {1, []tagString{{k1, "v1 other"}}, ts[2]}, {5, []tagString{{k2, "v2"}}, ts[3]}, {5, []tagString{{k1, "v1"}, {k2, "v2"}}, ts[4]}, }, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}}, &SumData{Value: 6, Start: ts[0]}, }, { []tag.Tag{{Key: k1, Value: "v1 other"}}, &SumData{Value: 1, Start: ts[2]}, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &SumData{Value: 5, Start: ts[3]}, }, { []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, &SumData{Value: 5, Start: ts[4]}, }, }, }, } for _, tt := range tcs { view.clearRows() view.subscribe() for _, r := range tt.records { mods := []tag.Mutator{} for _, t := range r.tags { mods = append(mods, tag.Insert(t.k, t.v)) } ctx, err := tag.New(context.Background(), mods...) if err != nil { t.Errorf("%v: New = %v", tt.label, err) } view.addSample(tag.FromContext(ctx), r.f, nil, r.t) } gotRows := view.collectedRows() if diff := cmp.Diff(gotRows, tt.wantRows, cmpopts.SortSlices(cmpRow)); diff != "" { t.Errorf("%v: unexpected row (got-, want+): %s", tt.label, diff) break } } } func TestCanonicalize(t *testing.T) { k1 := tag.MustNewKey("k1") k2 := tag.MustNewKey("k2") m := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitDimensionless) v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Sum()} err := v.canonicalize() if err != nil { t.Fatal(err) } if got, want := v.Name, "TestCanonicalize/m1"; got != want { t.Errorf("vc.Name = %q; want %q", got, want) } if got, want := v.Description, "desc desc"; got != want { t.Errorf("vc.Description = %q; want %q", got, want) } if got, want := len(v.TagKeys), 2; got != want { t.Errorf("len(vc.TagKeys) = %d; want %d", got, want) } if got, want := v.TagKeys[0].Name(), "k1"; got != want { t.Errorf("vc.TagKeys[0].Name() = %q; want %q", got, want) } } func TestViewSortedKeys(t *testing.T) { k1 := tag.MustNewKey("a") k2 := tag.MustNewKey("b") k3 := tag.MustNewKey("c") ks := []tag.Key{k1, k3, k2} m := stats.Int64("TestViewSortedKeys/m1", "", stats.UnitDimensionless) Register(&View{ Name: "sort_keys", Description: "desc sort_keys", TagKeys: ks, Measure: m, Aggregation: Sum(), }) // Register normalizes the view by sorting the tag keys, retrieve the normalized view v := Find("sort_keys") want := []string{"a", "b", "c"} vks := v.TagKeys if len(vks) != len(want) { t.Errorf("Keys = %+v; want %+v", vks, want) } for i, v := range want { if got, want := v, vks[i].Name(); got != want { t.Errorf("View name = %q; want %q", got, want) } } } func cmpRow(r1 *Row, r2 *Row) bool { return r1.Data.StartTime().Before(r2.Data.StartTime()) } func TestRegisterUnregisterParity(t *testing.T) { measures := []stats.Measure{ stats.Int64("ifoo", "iFOO", "iBar"), stats.Float64("ffoo", "fFOO", "fBar"), } aggregations := []*Aggregation{ Count(), Sum(), Distribution(1, 2.0, 4.0, 8.0, 16.0), } for i := 0; i < 10; i++ { for _, m := range measures { for _, agg := range aggregations { v := &View{ Aggregation: agg, Name: "Lookup here", Measure: m, } if err := Register(v); err != nil { t.Errorf("Iteration #%d:\nMeasure: (%#v)\nAggregation (%#v)\nError: %v", i, m, agg, err) } Unregister(v) } } } } func TestRegisterAfterMeasurement(t *testing.T) { // Tests that we can register views after measurements are created and // they still take effect. m := stats.Int64(t.Name(), "", stats.UnitDimensionless) mm := m.M(1) ctx := context.Background() stats.Record(ctx, mm) v := &View{ Measure: m, Aggregation: Count(), } if err := Register(v); err != nil { t.Fatal(err) } rows, err := RetrieveData(v.Name) if err != nil { t.Fatal(err) } if len(rows) > 0 { t.Error("View should not have data") } stats.Record(ctx, mm) rows, err = RetrieveData(v.Name) if err != nil { t.Fatal(err) } if len(rows) == 0 { t.Error("View should have data") } } func TestViewRegister_negativeBucketBounds(t *testing.T) { m := stats.Int64("TestViewRegister_negativeBucketBounds", "", "") v := &View{ Measure: m, Aggregation: Distribution(-1, 2), } err := Register(v) if err != ErrNegativeBucketBounds { t.Errorf("Expected ErrNegativeBucketBounds, got %v", err) } } func TestViewRegister_sortBuckets(t *testing.T) { m := stats.Int64("TestViewRegister_sortBuckets", "", "") v := &View{ Measure: m, Aggregation: Distribution(2, 1), } err := Register(v) if err != nil { t.Fatalf("Unexpected err %s", err) } want := []float64{1, 2} if diff := cmp.Diff(v.Aggregation.Buckets, want); diff != "" { t.Errorf("buckets differ -got +want: %s", diff) } } func TestViewRegister_dropZeroBuckets(t *testing.T) { m := stats.Int64("TestViewRegister_dropZeroBuckets", "", "") v := &View{ Measure: m, Aggregation: Distribution(2, 0, 1), } err := Register(v) if err != nil { t.Fatalf("Unexpected err %s", err) } want := []float64{1, 2} if diff := cmp.Diff(v.Aggregation.Buckets, want); diff != "" { t.Errorf("buckets differ -got +want: %s", diff) } } opencensus-go-0.24.0/stats/view/view_to_metric.go000066400000000000000000000071701433102037600220440ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "time" "go.opencensus.io/resource" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" ) func getUnit(unit string) metricdata.Unit { switch unit { case "1": return metricdata.UnitDimensionless case "ms": return metricdata.UnitMilliseconds case "By": return metricdata.UnitBytes } return metricdata.UnitDimensionless } func getType(v *View) metricdata.Type { m := v.Measure agg := v.Aggregation switch agg.Type { case AggTypeSum: switch m.(type) { case *stats.Int64Measure: return metricdata.TypeCumulativeInt64 case *stats.Float64Measure: return metricdata.TypeCumulativeFloat64 default: panic("unexpected measure type") } case AggTypeDistribution: return metricdata.TypeCumulativeDistribution case AggTypeLastValue: switch m.(type) { case *stats.Int64Measure: return metricdata.TypeGaugeInt64 case *stats.Float64Measure: return metricdata.TypeGaugeFloat64 default: panic("unexpected measure type") } case AggTypeCount: switch m.(type) { case *stats.Int64Measure: return metricdata.TypeCumulativeInt64 case *stats.Float64Measure: return metricdata.TypeCumulativeInt64 default: panic("unexpected measure type") } default: panic("unexpected aggregation type") } } func getLabelKeys(v *View) []metricdata.LabelKey { labelKeys := []metricdata.LabelKey{} for _, k := range v.TagKeys { labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) } return labelKeys } func viewToMetricDescriptor(v *View) *metricdata.Descriptor { return &metricdata.Descriptor{ Name: v.Name, Description: v.Description, Unit: convertUnit(v), Type: getType(v), LabelKeys: getLabelKeys(v), } } func convertUnit(v *View) metricdata.Unit { switch v.Aggregation.Type { case AggTypeCount: return metricdata.UnitDimensionless default: return getUnit(v.Measure.Unit()) } } func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { labelValues := []metricdata.LabelValue{} tagMap := make(map[string]string) for _, tag := range row.Tags { tagMap[tag.Key.Name()] = tag.Value } for _, key := range expectedKeys { if val, ok := tagMap[key.Key]; ok { labelValues = append(labelValues, metricdata.NewLabelValue(val)) } else { labelValues = append(labelValues, metricdata.LabelValue{}) } } return labelValues } func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { return &metricdata.TimeSeries{ Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), StartTime: row.Data.StartTime(), } } func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { rows := v.collectedRows() if len(rows) == 0 { return nil } ts := []*metricdata.TimeSeries{} for _, row := range rows { ts = append(ts, rowToTimeseries(v, row, now)) } m := &metricdata.Metric{ Descriptor: *v.metricDescriptor, TimeSeries: ts, Resource: r, } return m } opencensus-go-0.24.0/stats/view/view_to_metric_test.go000066400000000000000000000440071433102037600231030ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "context" "testing" "time" "encoding/json" "github.com/google/go-cmp/cmp" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" "go.opencensus.io/stats" "go.opencensus.io/tag" ) type recordValWithTag struct { tags []tag.Tag value interface{} } type testToMetrics struct { vi *viewInternal view *View recordValue []recordValWithTag wantMetric *metricdata.Metric } var ( // tag objects. tk1 tag.Key tk2 tag.Key tk3 tag.Key tk1v1 tag.Tag tk2v2 tag.Tag tags []tag.Tag labelValues []metricdata.LabelValue emptyLabelValues []metricdata.LabelValue labelKeys []metricdata.LabelKey recordsInt64 []recordValWithTag recordsFloat64 []recordValWithTag recordsFloat64WoTag []recordValWithTag // distribution objects. aggDist *Aggregation aggCnt *Aggregation aggS *Aggregation aggL *Aggregation buckOpt *metricdata.BucketOptions // exemplar objects. attachments metricdata.Attachments // views and descriptors viewTypeFloat64Distribution *View viewTypeInt64Distribution *View viewTypeInt64Count *View viewTypeFloat64Count *View viewTypeFloat64Sum *View viewTypeInt64Sum *View viewTypeFloat64LastValue *View viewTypeInt64LastValue *View viewRecordWithoutLabel *View mdTypeFloat64CumulativeDistribution metricdata.Descriptor mdTypeInt64CumulativeDistribution metricdata.Descriptor mdTypeInt64CumulativeCount metricdata.Descriptor mdTypeFloat64CumulativeCount metricdata.Descriptor mdTypeInt64CumulativeSum metricdata.Descriptor mdTypeFloat64CumulativeSum metricdata.Descriptor mdTypeInt64CumulativeLastValue metricdata.Descriptor mdTypeFloat64CumulativeLastValue metricdata.Descriptor mdTypeRecordWithoutLabel metricdata.Descriptor ) const ( nameInt64DistM1 = "viewToMetricTest_Int64_Distribution/m1" nameFloat64DistM1 = "viewToMetricTest_Float64_Distribution/m1" nameInt64CountM1 = "viewToMetricTest_Int64_Count/m1" nameFloat64CountM1 = "viewToMetricTest_Float64_Count/m1" nameInt64SumM1 = "viewToMetricTest_Int64_Sum/m1" nameFloat64SumM1 = "viewToMetricTest_Float64_Sum/m1" nameInt64LastValueM1 = "viewToMetricTest_Int64_LastValue/m1" nameFloat64LastValueM1 = "viewToMetricTest_Float64_LastValue/m1" nameRecordWithoutLabel = "viewToMetricTest_RecordWithoutLabel/m1" v1 = "v1" v2 = "v2" ) func init() { initTags() initAgg() initViews() initMetricDescriptors() } func initTags() { tk1 = tag.MustNewKey("k1") tk2 = tag.MustNewKey("k2") tk3 = tag.MustNewKey("k3") tk1v1 = tag.Tag{Key: tk1, Value: v1} tk2v2 = tag.Tag{Key: tk2, Value: v2} tags = []tag.Tag{tk1v1, tk2v2} labelValues = []metricdata.LabelValue{ {Value: v1, Present: true}, {Value: v2, Present: true}, } emptyLabelValues = []metricdata.LabelValue{ {Value: "", Present: false}, {Value: "", Present: false}, } labelKeys = []metricdata.LabelKey{ {Key: tk1.Name()}, {Key: tk2.Name()}, } recordsInt64 = []recordValWithTag{ {tags: tags, value: int64(2)}, {tags: tags, value: int64(4)}, } recordsFloat64 = []recordValWithTag{ {tags: tags, value: float64(1.5)}, {tags: tags, value: float64(5.4)}, } recordsFloat64WoTag = []recordValWithTag{ {value: float64(1.5)}, {value: float64(5.4)}, } } func initAgg() { aggDist = Distribution(2.0) aggCnt = Count() aggS = Sum() aggL = LastValue() buckOpt = &metricdata.BucketOptions{Bounds: []float64{2.0}} } func initViews() { // View objects viewTypeInt64Distribution = &View{ Name: nameInt64DistM1, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Int64(nameInt64DistM1, "", stats.UnitDimensionless), Aggregation: aggDist, } viewTypeFloat64Distribution = &View{ Name: nameFloat64DistM1, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Float64(nameFloat64DistM1, "", stats.UnitDimensionless), Aggregation: aggDist, } viewTypeInt64Count = &View{ Name: nameInt64CountM1, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Int64(nameInt64CountM1, "", stats.UnitDimensionless), Aggregation: aggCnt, } viewTypeFloat64Count = &View{ Name: nameFloat64CountM1, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Float64(nameFloat64CountM1, "", stats.UnitDimensionless), Aggregation: aggCnt, } viewTypeInt64Sum = &View{ Name: nameInt64SumM1, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Int64(nameInt64SumM1, "", stats.UnitBytes), Aggregation: aggS, } viewTypeFloat64Sum = &View{ Name: nameFloat64SumM1, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Float64(nameFloat64SumM1, "", stats.UnitMilliseconds), Aggregation: aggS, } viewTypeInt64LastValue = &View{ Name: nameInt64LastValueM1, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Int64(nameInt64LastValueM1, "", stats.UnitDimensionless), Aggregation: aggL, } viewTypeFloat64LastValue = &View{ Name: nameFloat64LastValueM1, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Float64(nameFloat64LastValueM1, "", stats.UnitDimensionless), Aggregation: aggL, } viewRecordWithoutLabel = &View{ Name: nameRecordWithoutLabel, TagKeys: []tag.Key{tk1, tk2}, Measure: stats.Float64(nameRecordWithoutLabel, "", stats.UnitDimensionless), Aggregation: aggL, } } func initMetricDescriptors() { // Metric objects mdTypeFloat64CumulativeDistribution = metricdata.Descriptor{ Name: nameFloat64DistM1, Description: "", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeCumulativeDistribution, LabelKeys: labelKeys, } mdTypeInt64CumulativeDistribution = metricdata.Descriptor{ Name: nameInt64DistM1, Description: "", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeCumulativeDistribution, LabelKeys: labelKeys, } mdTypeInt64CumulativeCount = metricdata.Descriptor{ Name: nameInt64CountM1, Description: "", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeCumulativeInt64, LabelKeys: labelKeys, } mdTypeFloat64CumulativeCount = metricdata.Descriptor{ Name: nameFloat64CountM1, Description: "", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeCumulativeInt64, LabelKeys: labelKeys, } mdTypeInt64CumulativeSum = metricdata.Descriptor{ Name: nameInt64SumM1, Description: "", Unit: metricdata.UnitBytes, Type: metricdata.TypeCumulativeInt64, LabelKeys: labelKeys, } mdTypeFloat64CumulativeSum = metricdata.Descriptor{ Name: nameFloat64SumM1, Description: "", Unit: metricdata.UnitMilliseconds, Type: metricdata.TypeCumulativeFloat64, LabelKeys: labelKeys, } mdTypeInt64CumulativeLastValue = metricdata.Descriptor{ Name: nameInt64LastValueM1, Description: "", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeGaugeInt64, LabelKeys: labelKeys, } mdTypeFloat64CumulativeLastValue = metricdata.Descriptor{ Name: nameFloat64LastValueM1, Description: "", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeGaugeFloat64, LabelKeys: labelKeys, } mdTypeRecordWithoutLabel = metricdata.Descriptor{ Name: nameRecordWithoutLabel, Description: "", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeGaugeFloat64, LabelKeys: labelKeys, } } func Test_ViewToMetric(t *testing.T) { now := time.Now() tests := []*testToMetrics{ { view: viewTypeInt64Distribution, recordValue: recordsInt64, wantMetric: &metricdata.Metric{ Descriptor: mdTypeInt64CumulativeDistribution, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ {Value: &metricdata.Distribution{ Count: 2, Sum: 6.0, SumOfSquaredDeviation: 2, BucketOptions: buckOpt, Buckets: []metricdata.Bucket{ {Count: 0, Exemplar: nil}, {Count: 2, Exemplar: nil}, }, }, Time: now, }, }, LabelValues: labelValues, StartTime: now, }, }, }, }, { view: viewTypeFloat64Distribution, recordValue: recordsFloat64, wantMetric: &metricdata.Metric{ Descriptor: mdTypeFloat64CumulativeDistribution, TimeSeries: []*metricdata.TimeSeries{ { Points: []metricdata.Point{ { Value: &metricdata.Distribution{ Count: 2, Sum: 6.9, SumOfSquaredDeviation: 7.605000000000001, BucketOptions: buckOpt, Buckets: []metricdata.Bucket{ {Count: 1, Exemplar: nil}, // TODO: [rghetia] add exemplar test. {Count: 1, Exemplar: nil}, }, }, Time: now, }, }, LabelValues: labelValues, StartTime: now, }, }, }, }, { view: viewTypeInt64Count, recordValue: recordsInt64, wantMetric: &metricdata.Metric{ Descriptor: mdTypeInt64CumulativeCount, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ metricdata.NewInt64Point(now, 2), }, LabelValues: labelValues, StartTime: now, }, }, }, }, { view: viewTypeFloat64Count, recordValue: recordsFloat64, wantMetric: &metricdata.Metric{ Descriptor: mdTypeFloat64CumulativeCount, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ metricdata.NewInt64Point(now, 2), }, LabelValues: labelValues, StartTime: now, }, }, }, }, { view: viewTypeInt64Sum, recordValue: recordsInt64, wantMetric: &metricdata.Metric{ Descriptor: mdTypeInt64CumulativeSum, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ metricdata.NewInt64Point(now, 6), }, LabelValues: labelValues, StartTime: now, }, }, }, }, { view: viewTypeFloat64Sum, recordValue: recordsFloat64, wantMetric: &metricdata.Metric{ Descriptor: mdTypeFloat64CumulativeSum, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ metricdata.NewFloat64Point(now, 6.9), }, LabelValues: labelValues, StartTime: now, }, }, }, }, { view: viewTypeInt64LastValue, recordValue: recordsInt64, wantMetric: &metricdata.Metric{ Descriptor: mdTypeInt64CumulativeLastValue, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ metricdata.NewInt64Point(now, 4), }, LabelValues: labelValues, StartTime: time.Time{}, }, }, }, }, { view: viewTypeFloat64LastValue, recordValue: recordsFloat64, wantMetric: &metricdata.Metric{ Descriptor: mdTypeFloat64CumulativeLastValue, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ metricdata.NewFloat64Point(now, 5.4), }, LabelValues: labelValues, StartTime: time.Time{}, }, }, }, }, { view: viewRecordWithoutLabel, recordValue: recordsFloat64WoTag, wantMetric: &metricdata.Metric{ Descriptor: mdTypeRecordWithoutLabel, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ metricdata.NewFloat64Point(now, 5.4), }, LabelValues: emptyLabelValues, StartTime: time.Time{}, }, }, }, }, } for _, tc := range tests { tc.vi, _ = defaultWorker.tryRegisterView(tc.view) tc.vi.clearRows() tc.vi.subscribe() } for i, tc := range tests { for _, r := range tc.recordValue { mods := []tag.Mutator{} for _, tg := range r.tags { mods = append(mods, tag.Insert(tg.Key, tg.Value)) } ctx, err := tag.New(context.Background(), mods...) if err != nil { t.Errorf("%v: New = %v", tc.view.Name, err) } var v float64 switch i := r.value.(type) { case float64: v = float64(i) case int64: v = float64(i) default: t.Errorf("unexpected value type %v", r.tags) } tc.vi.addSample(tag.FromContext(ctx), v, nil, now) } gotMetric := viewToMetric(tc.vi, nil, now) if !cmp.Equal(gotMetric, tc.wantMetric) { // JSON format is strictly for checking the content when test fails. Do not use JSON // format to determine if the two values are same as it doesn't differentiate between // int64(2) and float64(2.0) t.Errorf("#%d: Unmatched \nGot:\n\t%v\nWant:\n\t%v\nGot Serialized:%s\nWant Serialized:%s\n", i, gotMetric, tc.wantMetric, serializeAsJSON(gotMetric), serializeAsJSON(tc.wantMetric)) } } } // Test to verify that a metric converted from a view with Aggregation Count should always // have Dimensionless unit. func TestUnitConversionForAggCount(t *testing.T) { now := time.Now() tests := []*struct { name string vi *viewInternal v *View wantUnit metricdata.Unit }{ { name: "View with Count Aggregation on Latency measurement", v: &View{ Name: "request_count1", Measure: stats.Int64("request_latency", "", stats.UnitMilliseconds), Aggregation: aggCnt, }, wantUnit: metricdata.UnitDimensionless, }, { name: "View with Count Aggregation on bytes measurement", v: &View{ Name: "request_count2", Measure: stats.Int64("request_bytes", "", stats.UnitBytes), Aggregation: aggCnt, }, wantUnit: metricdata.UnitDimensionless, }, { name: "View with aggregation other than Count Aggregation on Latency measurement", v: &View{ Name: "request_latency", Measure: stats.Int64("request_latency", "", stats.UnitMilliseconds), Aggregation: aggSum, }, wantUnit: metricdata.UnitMilliseconds, }, } var err error for _, tc := range tests { tc.vi, err = defaultWorker.tryRegisterView(tc.v) if err != nil { t.Fatalf("error registering view: %v, err: %v\n", tc.v, err) } tc.vi.clearRows() tc.vi.subscribe() } for _, tc := range tests { tc.vi.addSample(tag.FromContext(context.Background()), 5.0, nil, now) gotMetric := viewToMetric(tc.vi, nil, now) gotUnit := gotMetric.Descriptor.Unit if !cmp.Equal(gotUnit, tc.wantUnit) { t.Errorf("Verify Unit: %s: Got:%v Want:%v", tc.name, gotUnit, tc.wantUnit) } } } type mockExp struct { metrics []*metricdata.Metric } func (me *mockExp) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { me.metrics = append(me.metrics, metrics...) return nil } var _ metricexport.Exporter = (*mockExp)(nil) func TestViewToMetric_OutOfOrderWithZeroBuckets(t *testing.T) { m := stats.Int64("OutOfOrderWithZeroBuckets", "", "") now := time.Now() tts := []struct { v *View m *metricdata.Metric }{ { v: &View{ Name: m.Name() + "_order1", Measure: m, Aggregation: Distribution(10, 0, 2), }, m: &metricdata.Metric{ Descriptor: metricdata.Descriptor{ Name: "OutOfOrderWithZeroBuckets_order1", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeCumulativeDistribution, LabelKeys: []metricdata.LabelKey{}, }, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ {Value: &metricdata.Distribution{ Count: 3, Sum: 9.0, SumOfSquaredDeviation: 8, BucketOptions: &metricdata.BucketOptions{ Bounds: []float64{2, 10}, }, Buckets: []metricdata.Bucket{ {Count: 1, Exemplar: nil}, {Count: 2, Exemplar: nil}, {Count: 0, Exemplar: nil}, }, }, Time: now, }, }, StartTime: now, LabelValues: []metricdata.LabelValue{}, }, }, }, }, { v: &View{ Name: m.Name() + "_order2", Measure: m, Aggregation: Distribution(0, 5, 10), }, m: &metricdata.Metric{ Descriptor: metricdata.Descriptor{ Name: "OutOfOrderWithZeroBuckets_order2", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeCumulativeDistribution, LabelKeys: []metricdata.LabelKey{}, }, TimeSeries: []*metricdata.TimeSeries{ {Points: []metricdata.Point{ {Value: &metricdata.Distribution{ Count: 3, Sum: 9.0, SumOfSquaredDeviation: 8, BucketOptions: &metricdata.BucketOptions{ Bounds: []float64{5, 10}, }, Buckets: []metricdata.Bucket{ {Count: 2, Exemplar: nil}, {Count: 1, Exemplar: nil}, {Count: 0, Exemplar: nil}, }, }, Time: now, }, }, StartTime: now, LabelValues: []metricdata.LabelValue{}, }, }, }, }, } for _, tt := range tts { err := Register(tt.v) if err != nil { t.Fatalf("error registering view %v, err: %v", tt.v, err) } } stats.Record(context.Background(), m.M(5), m.M(1), m.M(3)) time.Sleep(1 * time.Second) me := &mockExp{} reader := metricexport.NewReader() reader.ReadAndExport(me) var got *metricdata.Metric lookup := func(vname string, metrics []*metricdata.Metric) *metricdata.Metric { for _, m := range metrics { if m.Descriptor.Name == vname { return m } } return nil } for _, tt := range tts { got = lookup(tt.v.Name, me.metrics) if got == nil { t.Fatalf("metric %s not found in %v\n", tt.v.Name, me.metrics) } got.TimeSeries[0].Points[0].Time = now got.TimeSeries[0].StartTime = now want := tt.m if diff := cmp.Diff(got, want); diff != "" { t.Errorf("buckets differ -got +want: %s \n Serialized got %v\n, Serialized want %v\n", diff, serializeAsJSON(got), serializeAsJSON(want)) } } } func serializeAsJSON(v interface{}) string { blob, _ := json.MarshalIndent(v, "", " ") return string(blob) } opencensus-go-0.24.0/stats/view/worker.go000066400000000000000000000303201433102037600203270ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "fmt" "sync" "time" "go.opencensus.io/resource" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record internal.MeasurementRecorder = recordMeasurement } type measureRef struct { measure string views map[*viewInternal]struct{} } type worker struct { measures map[string]*measureRef views map[string]*viewInternal viewStartTimes map[*viewInternal]time.Time timer *time.Ticker c chan command quit, done chan bool mu sync.RWMutex r *resource.Resource exportersMu sync.RWMutex exporters map[Exporter]struct{} } // Meter defines an interface which allows a single process to maintain // multiple sets of metrics exports (intended for the advanced case where a // single process wants to report metrics about multiple objects, such as // multiple databases or HTTP services). // // Note that this is an advanced use case, and the static functions in this // module should cover the common use cases. type Meter interface { stats.Recorder // Find returns a registered view associated with this name. // If no registered view is found, nil is returned. Find(name string) *View // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. Register(views ...*View) error // Unregister the given views. Data will not longer be exported for these views // after Unregister returns. // It is not necessary to unregister from views you expect to collect for the // duration of your program execution. Unregister(views ...*View) // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. // // Note: each exporter makes different promises about what the lowest supported // duration is. For example, the Stackdriver exporter recommends a value no // lower than 1 minute. Consult each exporter per your needs. SetReportingPeriod(time.Duration) // RegisterExporter registers an exporter. // Collected data will be reported via all the // registered exporters. Once you no longer // want data to be exported, invoke UnregisterExporter // with the previously registered exporter. // // Binaries can register exporters, libraries shouldn't register exporters. RegisterExporter(Exporter) // UnregisterExporter unregisters an exporter. UnregisterExporter(Exporter) // SetResource may be used to set the Resource associated with this registry. // This is intended to be used in cases where a single process exports metrics // for multiple Resources, typically in a multi-tenant situation. SetResource(*resource.Resource) // Start causes the Meter to start processing Record calls and aggregating // statistics as well as exporting data. Start() // Stop causes the Meter to stop processing calls and terminate data export. Stop() // RetrieveData gets a snapshot of the data collected for the the view registered // with the given name. It is intended for testing only. RetrieveData(viewName string) ([]*Row, error) } var _ Meter = (*worker)(nil) var defaultWorker *worker var defaultReportingDuration = 10 * time.Second // Find returns a registered view associated with this name. // If no registered view is found, nil is returned. func Find(name string) (v *View) { return defaultWorker.Find(name) } // Find returns a registered view associated with this name. // If no registered view is found, nil is returned. func (w *worker) Find(name string) (v *View) { req := &getViewByNameReq{ name: name, c: make(chan *getViewByNameResp), } w.c <- req resp := <-req.c return resp.v } // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { return defaultWorker.Register(views...) } // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func (w *worker) Register(views ...*View) error { req := ®isterViewReq{ views: views, err: make(chan error), } w.c <- req return <-req.err } // Unregister the given views. Data will not longer be exported for these views // after Unregister returns. // It is not necessary to unregister from views you expect to collect for the // duration of your program execution. func Unregister(views ...*View) { defaultWorker.Unregister(views...) } // Unregister the given views. Data will not longer be exported for these views // after Unregister returns. // It is not necessary to unregister from views you expect to collect for the // duration of your program execution. func (w *worker) Unregister(views ...*View) { names := make([]string, len(views)) for i := range views { names[i] = views[i].Name } req := &unregisterFromViewReq{ views: names, done: make(chan struct{}), } w.c <- req <-req.done } // RetrieveData gets a snapshot of the data collected for the the view registered // with the given name. It is intended for testing only. func RetrieveData(viewName string) ([]*Row, error) { return defaultWorker.RetrieveData(viewName) } // RetrieveData gets a snapshot of the data collected for the the view registered // with the given name. It is intended for testing only. func (w *worker) RetrieveData(viewName string) ([]*Row, error) { req := &retrieveDataReq{ now: time.Now(), v: viewName, c: make(chan *retrieveDataResp), } w.c <- req resp := <-req.c return resp.rows, resp.err } func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { defaultWorker.recordMeasurement(tags, ms, attachments) } // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) } // recordMeasurement records a set of measurements ms associated with the given tags and attachments. // This is the same as Record but without an interface{} type to avoid allocations func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, ms: ms, attachments: attachments, t: time.Now(), } w.c <- req } // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. // // Note: each exporter makes different promises about what the lowest supported // duration is. For example, the Stackdriver exporter recommends a value no // lower than 1 minute. Consult each exporter per your needs. func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } // Stop stops the default worker. func Stop() { defaultWorker.Stop() } // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. // // Note: each exporter makes different promises about what the lowest supported // duration is. For example, the Stackdriver exporter recommends a value no // lower than 1 minute. Consult each exporter per your needs. func (w *worker) SetReportingPeriod(d time.Duration) { // TODO(acetechnologist): ensure that the duration d is more than a certain // value. e.g. 1s req := &setReportingPeriodReq{ d: d, c: make(chan bool), } w.c <- req <-req.c // don't return until the timer is set to the new duration. } // NewMeter constructs a Meter instance. You should only need to use this if // you need to separate out Measurement recordings and View aggregations within // a single process. func NewMeter() Meter { return &worker{ measures: make(map[string]*measureRef), views: make(map[string]*viewInternal), viewStartTimes: make(map[*viewInternal]time.Time), timer: time.NewTicker(defaultReportingDuration), c: make(chan command, 1024), quit: make(chan bool), done: make(chan bool), exporters: make(map[Exporter]struct{}), } } // SetResource associates all data collected by this Meter with the specified // resource. This resource is reported when using metricexport.ReadAndExport; // it is not provided when used with ExportView/RegisterExporter, because that // interface does not provide a means for reporting the Resource. func (w *worker) SetResource(r *resource.Resource) { w.r = r } func (w *worker) Start() { go w.start() } func (w *worker) start() { prodMgr := metricproducer.GlobalManager() prodMgr.AddProducer(w) for { select { case cmd := <-w.c: cmd.handleCommand(w) case <-w.timer.C: w.reportUsage() case <-w.quit: w.timer.Stop() close(w.c) close(w.done) return } } } func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) select { case <-w.quit: default: close(w.quit) } <-w.done } func (w *worker) getMeasureRef(name string) *measureRef { if mr, ok := w.measures[name]; ok { return mr } mr := &measureRef{ measure: name, views: make(map[*viewInternal]struct{}), } w.measures[name] = mr return mr } func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { w.mu.Lock() defer w.mu.Unlock() vi, err := newViewInternal(v) if err != nil { return nil, err } if x, ok := w.views[vi.view.Name]; ok { if !x.view.same(vi.view) { return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) } // the view is already registered so there is nothing to do and the // command is considered successful. return x, nil } w.views[vi.view.Name] = vi w.viewStartTimes[vi] = time.Now() ref := w.getMeasureRef(vi.view.Measure.Name()) ref.views[vi] = struct{}{} return vi, nil } func (w *worker) unregisterView(v *viewInternal) { w.mu.Lock() defer w.mu.Unlock() delete(w.views, v.view.Name) delete(w.viewStartTimes, v) if measure := w.measures[v.view.Measure.Name()]; measure != nil { delete(measure.views, v) } } func (w *worker) reportView(v *viewInternal) { if !v.isSubscribed() { return } rows := v.collectedRows() viewData := &Data{ View: v.view, Start: w.viewStartTimes[v], End: time.Now(), Rows: rows, } w.exportersMu.Lock() defer w.exportersMu.Unlock() for e := range w.exporters { e.ExportView(viewData) } } func (w *worker) reportUsage() { w.mu.Lock() defer w.mu.Unlock() for _, v := range w.views { w.reportView(v) } } func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { if !v.isSubscribed() { return nil } return viewToMetric(v, w.r, now) } // Read reads all view data and returns them as metrics. // It is typically invoked by metric reader to export stats in metric format. func (w *worker) Read() []*metricdata.Metric { w.mu.Lock() defer w.mu.Unlock() now := time.Now() metrics := make([]*metricdata.Metric, 0, len(w.views)) for _, v := range w.views { metric := w.toMetric(v, now) if metric != nil { metrics = append(metrics, metric) } } return metrics } func (w *worker) RegisterExporter(e Exporter) { w.exportersMu.Lock() defer w.exportersMu.Unlock() w.exporters[e] = struct{}{} } func (w *worker) UnregisterExporter(e Exporter) { w.exportersMu.Lock() defer w.exportersMu.Unlock() delete(w.exporters, e) } opencensus-go-0.24.0/stats/view/worker_commands.go000066400000000000000000000101471433102037600222150ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "errors" "fmt" "strings" "time" "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) type command interface { handleCommand(w *worker) } // getViewByNameReq is the command to get a view given its name. type getViewByNameReq struct { name string c chan *getViewByNameResp } type getViewByNameResp struct { v *View } func (cmd *getViewByNameReq) handleCommand(w *worker) { v := w.views[cmd.name] if v == nil { cmd.c <- &getViewByNameResp{nil} return } cmd.c <- &getViewByNameResp{v.view} } // registerViewReq is the command to register a view. type registerViewReq struct { views []*View err chan error } func (cmd *registerViewReq) handleCommand(w *worker) { for _, v := range cmd.views { if err := v.canonicalize(); err != nil { cmd.err <- err return } } var errstr []string for _, view := range cmd.views { vi, err := w.tryRegisterView(view) if err != nil { errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) continue } internal.SubscriptionReporter(view.Measure.Name()) vi.subscribe() } if len(errstr) > 0 { cmd.err <- errors.New(strings.Join(errstr, "\n")) } else { cmd.err <- nil } } // unregisterFromViewReq is the command to unregister to a view. Has no // impact on the data collection for client that are pulling data from the // library. type unregisterFromViewReq struct { views []string done chan struct{} } func (cmd *unregisterFromViewReq) handleCommand(w *worker) { for _, name := range cmd.views { vi, ok := w.views[name] if !ok { continue } // Report pending data for this view before removing it. w.reportView(vi) vi.unsubscribe() if !vi.isSubscribed() { // this was the last subscription and view is not collecting anymore. // The collected data can be cleared. vi.clearRows() } w.unregisterView(vi) } cmd.done <- struct{}{} } // retrieveDataReq is the command to retrieve data for a view. type retrieveDataReq struct { now time.Time v string c chan *retrieveDataResp } type retrieveDataResp struct { rows []*Row err error } func (cmd *retrieveDataReq) handleCommand(w *worker) { w.mu.Lock() defer w.mu.Unlock() vi, ok := w.views[cmd.v] if !ok { cmd.c <- &retrieveDataResp{ nil, fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), } return } if !vi.isSubscribed() { cmd.c <- &retrieveDataResp{ nil, fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), } return } cmd.c <- &retrieveDataResp{ vi.collectedRows(), nil, } } // recordReq is the command to record data related to multiple measures // at once. type recordReq struct { tm *tag.Map ms []stats.Measurement attachments map[string]interface{} t time.Time } func (cmd *recordReq) handleCommand(w *worker) { w.mu.Lock() defer w.mu.Unlock() for _, m := range cmd.ms { if (m == stats.Measurement{}) { // not registered continue } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) } } } // setReportingPeriodReq is the command to modify the duration between // reporting the collected data to the registered clients. type setReportingPeriodReq struct { d time.Duration c chan bool } func (cmd *setReportingPeriodReq) handleCommand(w *worker) { w.timer.Stop() if cmd.d <= 0 { w.timer = time.NewTicker(defaultReportingDuration) } else { w.timer = time.NewTicker(cmd.d) } cmd.c <- true } opencensus-go-0.24.0/stats/view/worker_test.go000066400000000000000000000346321433102037600214000ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package view import ( "context" "errors" "sort" "sync" "testing" "time" "github.com/google/go-cmp/cmp" "go.opencensus.io/resource" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" "go.opencensus.io/stats" "go.opencensus.io/tag" ) func Test_Worker_ViewRegistration(t *testing.T) { someError := errors.New("some error") sc1 := make(chan *Data) type registration struct { c chan *Data vID string err error } type testCase struct { label string registrations []registration } tcs := []testCase{ { "register v1ID", []registration{ { sc1, "v1ID", nil, }, }, }, { "register v1ID+v2ID", []registration{ { sc1, "v1ID", nil, }, }, }, { "register to v1ID; ??? to v1ID and view with same ID", []registration{ { sc1, "v1ID", nil, }, { sc1, "v1SameNameID", someError, }, }, }, } mf1 := stats.Float64("MF1/Test_Worker_ViewSubscription", "desc MF1", "unit") mf2 := stats.Float64("MF2/Test_Worker_ViewSubscription", "desc MF2", "unit") for _, tc := range tcs { t.Run(tc.label, func(t *testing.T) { restart() views := map[string]*View{ "v1ID": { Name: "VF1", Measure: mf1, Aggregation: Count(), }, "v1SameNameID": { Name: "VF1", Description: "desc duplicate name VF1", Measure: mf1, Aggregation: Sum(), }, "v2ID": { Name: "VF2", Measure: mf2, Aggregation: Count(), }, "vNilID": nil, } for _, r := range tc.registrations { v := views[r.vID] err := Register(v) if (err != nil) != (r.err != nil) { t.Errorf("%v: Register() = %v, want %v", tc.label, err, r.err) } } }) } } func Test_Worker_MultiExport(t *testing.T) { restart() // This test reports the same data for the default worker and a secondary // worker, and ensures that the stats are kept independently. extraResource := resource.Resource{ Type: "additional", Labels: map[string]string{"key1": "value1", "key2": "value2"}, } worker2 := NewMeter().(*worker) worker2.Start() worker2.SetResource(&extraResource) m := stats.Float64("Test_Worker_MultiExport/MF1", "desc MF1", "unit") key := tag.MustNewKey(("key")) count := &View{"VF1", "description", []tag.Key{key}, m, Count()} sum := &View{"VF2", "description", []tag.Key{}, m, Sum()} Register(count, sum) worker2.Register(count) // Don't compute the sum for worker2, to verify independence of computation. data := []struct { w Meter tags string // Tag values value float64 }{{ tags: "a", value: 2.0, }, { tags: "b", value: 3.0, }, { tags: "a", value: 2.5, }, { w: worker2, tags: "b", value: 1.0, }, } for _, d := range data { ctx, err := tag.New(context.Background(), tag.Upsert(key, d.tags)) if err != nil { t.Fatalf("%s: failed to add tag %q: %v", d.w, key.Name(), err) } if d.w != nil { d.w.Record(tag.FromContext(ctx), []stats.Measurement{m.M(d.value)}, nil) } else { stats.Record(ctx, m.M(d.value)) } } makeKey := func(r *resource.Resource, view string) string { if r == nil { r = &resource.Resource{} } return resource.EncodeLabels(r.Labels) + "/" + view } // Format is Resource.Labels encoded as string, then wantPartialData := map[string][]*Row{ makeKey(nil, count.Name): { {[]tag.Tag{{Key: key, Value: "a"}}, &CountData{Value: 2}}, {[]tag.Tag{{Key: key, Value: "b"}}, &CountData{Value: 1}}, }, makeKey(nil, sum.Name): { {nil, &SumData{Value: 7.5}}, }, makeKey(&extraResource, count.Name): { {[]tag.Tag{{Key: key, Value: "b"}}, &CountData{Value: 1}}, }, } te := &testExporter{} metricexport.NewReader().ReadAndExport(te) for _, m := range te.metrics { key := makeKey(m.Resource, m.Descriptor.Name) want, ok := wantPartialData[key] if !ok { t.Errorf("Unexpected data for %q: %v", key, m) continue } gotTs := m.TimeSeries sort.Sort(byLabel(gotTs)) for i, ts := range gotTs { for j, label := range ts.LabelValues { if want[i].Tags[j].Value != label.Value { t.Errorf("Mismatched tag values (want %q, got %q) for %v in %q", want[i].Tags[j].Value, label.Value, ts, key) } } switch wantValue := want[i].Data.(type) { case *CountData: got := ts.Points[0].Value.(int64) if wantValue.Value != got { t.Errorf("Mismatched value (want %d, got %d) for %v in %q", wantValue.Value, got, ts, key) } case *SumData: got := ts.Points[0].Value.(float64) if wantValue.Value != got { t.Errorf("Mismatched value (want %f, got %f) for %v in %q", wantValue.Value, got, ts, key) } default: t.Errorf("Unexpected type of data: %T for %v in %q", wantValue, want[i], key) } } } // Verify that worker has not been computing sum: got, err := worker2.RetrieveData(sum.Name) if err == nil { t.Errorf("%s: expected no data because it was not registered, got %#v", sum.Name, got) } Unregister(count, sum) worker2.Unregister(count) worker2.Stop() } func Test_Worker_RecordFloat64(t *testing.T) { restart() someError := errors.New("some error") m := stats.Float64("Test_Worker_RecordFloat64/MF1", "desc MF1", "unit") k1 := tag.MustNewKey("k1") k2 := tag.MustNewKey("k2") ctx, err := tag.New(context.Background(), tag.Insert(k1, "v1"), tag.Insert(k2, "v2"), ) if err != nil { t.Fatal(err) } v1 := &View{"VF1", "desc VF1", []tag.Key{k1, k2}, m, Count()} v2 := &View{"VF2", "desc VF2", []tag.Key{k1, k2}, m, Count()} type want struct { v *View rows []*Row err error } type testCase struct { label string registrations []*View records []float64 wants []want } tcs := []testCase{ { label: "0", registrations: []*View{}, records: []float64{1, 1}, wants: []want{{v1, nil, someError}, {v2, nil, someError}}, }, { label: "1", registrations: []*View{v1}, records: []float64{1, 1}, wants: []want{ { v1, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, &CountData{Value: 2}, }, }, nil, }, {v2, nil, someError}, }, }, { label: "2", registrations: []*View{v1, v2}, records: []float64{1, 1}, wants: []want{ { v1, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, &CountData{Value: 2}, }, }, nil, }, { v2, []*Row{ { []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, &CountData{Value: 2}, }, }, nil, }, }, }, } for _, tc := range tcs { for _, v := range tc.registrations { if err := Register(v); err != nil { t.Fatalf("%v: Register(%v) = %v; want no errors", tc.label, v.Name, err) } } for _, value := range tc.records { stats.Record(ctx, m.M(value)) } for _, w := range tc.wants { gotRows, err := RetrieveData(w.v.Name) for i := range gotRows { switch data := gotRows[i].Data.(type) { case *CountData: data.Start = time.Time{} case *SumData: data.Start = time.Time{} case *DistributionData: data.Start = time.Time{} } } if (err != nil) != (w.err != nil) { t.Fatalf("%s: RetrieveData(%v) = %v; want error = %v", tc.label, w.v.Name, err, w.err) } if diff := cmp.Diff(gotRows, w.rows); diff != "" { t.Errorf("%v: unexpected row (got-, want+): %s", tc.label, diff) break } } // Cleaning up. Unregister(tc.registrations...) } } func TestReportUsage(t *testing.T) { ctx := context.Background() m := stats.Int64("measure", "desc", "unit") tests := []struct { name string view *View wantMaxCount int64 }{ { name: "cum", view: &View{Name: "cum1", Measure: m, Aggregation: Count()}, wantMaxCount: 8, }, { name: "cum2", view: &View{Name: "cum1", Measure: m, Aggregation: Count()}, wantMaxCount: 8, }, } for _, tt := range tests { restart() SetReportingPeriod(25 * time.Millisecond) if err := Register(tt.view); err != nil { t.Fatalf("%v: cannot register: %v", tt.name, err) } e := &countExporter{} RegisterExporter(e) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) time.Sleep(50 * time.Millisecond) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) time.Sleep(50 * time.Millisecond) e.Lock() count := e.count e.Unlock() if got, want := count, tt.wantMaxCount; got > want { t.Errorf("%v: got count data = %v; want at most %v", tt.name, got, want) } } } func Test_SetReportingPeriodReqNeverBlocks(t *testing.T) { t.Parallel() worker := NewMeter().(*worker) durations := []time.Duration{-1, 0, 10, 100 * time.Millisecond} for i, duration := range durations { ackChan := make(chan bool, 1) cmd := &setReportingPeriodReq{c: ackChan, d: duration} cmd.handleCommand(worker) select { case <-ackChan: case <-time.After(500 * time.Millisecond): // Arbitrarily using 500ms as the timeout duration. t.Errorf("#%d: duration %v blocks", i, duration) } } } func TestWorkerStarttime(t *testing.T) { restart() ctx := context.Background() m := stats.Int64("measure/TestWorkerStarttime", "desc", "unit") v := &View{ Name: "testview", Measure: m, Aggregation: Count(), } SetReportingPeriod(25 * time.Millisecond) if err := Register(v); err != nil { t.Fatalf("cannot register to %v: %v", v.Name, err) } e := &vdExporter{} RegisterExporter(e) defer UnregisterExporter(e) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) time.Sleep(50 * time.Millisecond) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) stats.Record(ctx, m.M(1)) time.Sleep(50 * time.Millisecond) e.Lock() if len(e.vds) == 0 { t.Fatal("Got no view data; want at least one") } var start time.Time for _, vd := range e.vds { if start.IsZero() { start = vd.Start } if !vd.Start.Equal(start) { t.Errorf("Cumulative view data start time = %v; want %v", vd.Start, start) } } e.Unlock() } func TestUnregisterReportsUsage(t *testing.T) { restart() ctx := context.Background() m1 := stats.Int64("measure", "desc", "unit") view1 := &View{Name: "count", Measure: m1, Aggregation: Count()} m2 := stats.Int64("measure2", "desc", "unit") view2 := &View{Name: "count2", Measure: m2, Aggregation: Count()} SetReportingPeriod(time.Hour) if err := Register(view1, view2); err != nil { t.Fatalf("cannot register: %v", err) } e := &countExporter{} RegisterExporter(e) stats.Record(ctx, m1.M(1)) stats.Record(ctx, m2.M(1)) stats.Record(ctx, m2.M(1)) Unregister(view2) // Unregister should only flush view2, so expect the count of 2. want := int64(2) e.Lock() got := e.totalCount e.Unlock() if got != want { t.Errorf("got count data = %v; want %v", got, want) } } func TestWorkerRace(t *testing.T) { restart() ctx := context.Background() m1 := stats.Int64("measure", "desc", "unit") view1 := &View{Name: "count", Measure: m1, Aggregation: Count()} m2 := stats.Int64("measure2", "desc", "unit") view2 := &View{Name: "count2", Measure: m2, Aggregation: Count()} // 1. This will export every microsecond. SetReportingPeriod(time.Microsecond) if err := Register(view1, view2); err != nil { t.Fatalf("cannot register: %v", err) } e := &countExporter{} RegisterExporter(e) // Synchronize and make sure every goroutine has terminated before we exit var waiter sync.WaitGroup waiter.Add(3) defer waiter.Wait() doneCh := make(chan bool) // 2. Record write routine at 700ns go func() { defer waiter.Done() tick := time.NewTicker(700 * time.Nanosecond) defer tick.Stop() defer func() { close(doneCh) }() for i := 0; i < 1e3; i++ { stats.Record(ctx, m1.M(1)) stats.Record(ctx, m2.M(1)) stats.Record(ctx, m2.M(1)) <-tick.C } }() // 2. Simulating RetrieveData 900ns go func() { defer waiter.Done() tick := time.NewTicker(900 * time.Nanosecond) defer tick.Stop() for { select { case <-doneCh: return case <-tick.C: RetrieveData(view1.Name) } } }() // 4. Export via Reader routine at 800ns go func() { defer waiter.Done() tick := time.NewTicker(800 * time.Nanosecond) defer tick.Stop() reader := metricexport.Reader{} for { select { case <-doneCh: return case <-tick.C: // Perform some collection here reader.ReadAndExport(&testExporter{}) } } }() } type testExporter struct { metrics []*metricdata.Metric } func (te *testExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { te.metrics = metrics return nil } type countExporter struct { sync.Mutex count int64 totalCount int64 } func (e *countExporter) ExportView(vd *Data) { if len(vd.Rows) == 0 { return } d := vd.Rows[0].Data.(*CountData) e.Lock() defer e.Unlock() e.count = d.Value e.totalCount += d.Value } type vdExporter struct { sync.Mutex vds []*Data } func (e *vdExporter) ExportView(vd *Data) { e.Lock() defer e.Unlock() e.vds = append(e.vds, vd) } // restart stops the current processors and creates a new one. func restart() { defaultWorker.Stop() defaultWorker = NewMeter().(*worker) go defaultWorker.start() } // byTag implements sort.Interface for *metricdata.TimeSeries by Labels. type byLabel []*metricdata.TimeSeries func (ts byLabel) Len() int { return len(ts) } func (ts byLabel) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } func (ts byLabel) Less(i, j int) bool { if len(ts[i].LabelValues) != len(ts[j].LabelValues) { return len(ts[i].LabelValues) < len(ts[j].LabelValues) } for k := range ts[i].LabelValues { if ts[i].LabelValues[k].Value != ts[j].LabelValues[k].Value { return ts[i].LabelValues[k].Value < ts[j].LabelValues[k].Value } } return false } opencensus-go-0.24.0/tag/000077500000000000000000000000001433102037600151345ustar00rootroot00000000000000opencensus-go-0.24.0/tag/context.go000066400000000000000000000025441433102037600171540ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tag import ( "context" ) // FromContext returns the tag map stored in the context. func FromContext(ctx context.Context) *Map { // The returned tag map shouldn't be mutated. ts := ctx.Value(mapCtxKey) if ts == nil { return nil } return ts.(*Map) } // NewContext creates a new context with the given tag map. // To propagate a tag map to downstream methods and downstream RPCs, add a tag map // to the current context. NewContext will return a copy of the current context, // and put the tag map into the returned one. // If there is already a tag map in the current context, it will be replaced with m. func NewContext(ctx context.Context, m *Map) context.Context { return context.WithValue(ctx, mapCtxKey, m) } type ctxKey struct{} var mapCtxKey = ctxKey{} opencensus-go-0.24.0/tag/doc.go000066400000000000000000000017001433102037600162260ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /* Package tag contains OpenCensus tags. Tags are key-value pairs. Tags provide additional cardinality to the OpenCensus instrumentation data. Tags can be propagated on the wire and in the same process via context.Context. Encode and Decode should be used to represent tags into their binary propagation form. */ package tag // import "go.opencensus.io/tag" opencensus-go-0.24.0/tag/example_test.go000066400000000000000000000037051433102037600201620ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tag_test import ( "context" "log" "go.opencensus.io/tag" ) var ( tagMap *tag.Map ctx context.Context key tag.Key ) func ExampleNewKey() { // Get a key to represent user OS. key, err := tag.NewKey("example.com/keys/user-os") if err != nil { log.Fatal(err) } _ = key // use key } func ExampleMustNewKey() { key := tag.MustNewKey("example.com/keys/user-os") _ = key // use key } func ExampleNew() { osKey := tag.MustNewKey("example.com/keys/user-os") userIDKey := tag.MustNewKey("example.com/keys/user-id") ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), ) if err != nil { log.Fatal(err) } _ = ctx // use context } func ExampleNew_replace() { ctx, err := tag.New(ctx, tag.Insert(key, "macOS-10.12.5"), tag.Upsert(key, "macOS-10.12.7"), ) if err != nil { log.Fatal(err) } _ = ctx // use context } func ExampleNewContext() { // Propagate the tag map in the current context. ctx := tag.NewContext(context.Background(), tagMap) _ = ctx // use context } func ExampleFromContext() { tagMap := tag.FromContext(ctx) _ = tagMap // use the tag map } func ExampleDo() { ctx, err := tag.New(ctx, tag.Insert(key, "macOS-10.12.5"), tag.Upsert(key, "macOS-10.12.7"), ) if err != nil { log.Fatal(err) } tag.Do(ctx, func(ctx context.Context) { _ = ctx // use context }) } opencensus-go-0.24.0/tag/key.go000066400000000000000000000023151433102037600162540ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tag // Key represents a tag key. type Key struct { name string } // NewKey creates or retrieves a string key identified by name. // Calling NewKey more than once with the same name returns the same key. func NewKey(name string) (Key, error) { if !checkKeyName(name) { return Key{}, errInvalidKeyName } return Key{name: name}, nil } // MustNewKey returns a key with the given name, and panics if name is an invalid key name. func MustNewKey(name string) Key { k, err := NewKey(name) if err != nil { panic(err) } return k } // Name returns the name of the key. func (k Key) Name() string { return k.name } opencensus-go-0.24.0/tag/map.go000066400000000000000000000130721433102037600162430ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tag import ( "bytes" "context" "fmt" "sort" ) // Tag is a key value pair that can be propagated on wire. type Tag struct { Key Key Value string } type tagContent struct { value string m metadatas } // Map is a map of tags. Use New to create a context containing // a new Map. type Map struct { m map[Key]tagContent } // Value returns the value for the key if a value for the key exists. func (m *Map) Value(k Key) (string, bool) { if m == nil { return "", false } v, ok := m.m[k] return v.value, ok } func (m *Map) String() string { if m == nil { return "nil" } keys := make([]Key, 0, len(m.m)) for k := range m.m { keys = append(keys, k) } sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) var buffer bytes.Buffer buffer.WriteString("{ ") for _, k := range keys { buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) } buffer.WriteString(" }") return buffer.String() } func (m *Map) insert(k Key, v string, md metadatas) { if _, ok := m.m[k]; ok { return } m.m[k] = tagContent{value: v, m: md} } func (m *Map) update(k Key, v string, md metadatas) { if _, ok := m.m[k]; ok { m.m[k] = tagContent{value: v, m: md} } } func (m *Map) upsert(k Key, v string, md metadatas) { m.m[k] = tagContent{value: v, m: md} } func (m *Map) delete(k Key) { delete(m.m, k) } func newMap() *Map { return &Map{m: make(map[Key]tagContent)} } // Mutator modifies a tag map. type Mutator interface { Mutate(t *Map) (*Map, error) } // Insert returns a mutator that inserts a // value associated with k. If k already exists in the tag map, // mutator doesn't update the value. // Metadata applies metadata to the tag. It is optional. // Metadatas are applied in the order in which it is provided. // If more than one metadata updates the same attribute then // the update from the last metadata prevails. func Insert(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } m.insert(k, v, createMetadatas(mds...)) return m, nil }, } } // Update returns a mutator that updates the // value of the tag associated with k with v. If k doesn't // exists in the tag map, the mutator doesn't insert the value. // Metadata applies metadata to the tag. It is optional. // Metadatas are applied in the order in which it is provided. // If more than one metadata updates the same attribute then // the update from the last metadata prevails. func Update(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } m.update(k, v, createMetadatas(mds...)) return m, nil }, } } // Upsert returns a mutator that upserts the // value of the tag associated with k with v. It inserts the // value if k doesn't exist already. It mutates the value // if k already exists. // Metadata applies metadata to the tag. It is optional. // Metadatas are applied in the order in which it is provided. // If more than one metadata updates the same attribute then // the update from the last metadata prevails. func Upsert(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } m.upsert(k, v, createMetadatas(mds...)) return m, nil }, } } func createMetadatas(mds ...Metadata) metadatas { var metas metadatas if len(mds) > 0 { for _, md := range mds { if md != nil { md(&metas) } } } else { WithTTL(TTLUnlimitedPropagation)(&metas) } return metas } // Delete returns a mutator that deletes // the value associated with k. func Delete(k Key) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { m.delete(k) return m, nil }, } } // New returns a new context that contains a tag map // originated from the incoming context and modified // with the provided mutators. func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { m := newMap() orig := FromContext(ctx) if orig != nil { for k, v := range orig.m { if !checkKeyName(k.Name()) { return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) } if !checkValue(v.value) { return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) } m.insert(k, v.value, v.m) } } var err error for _, mod := range mutator { m, err = mod.Mutate(m) if err != nil { return ctx, err } } return NewContext(ctx, m), nil } // Do is similar to pprof.Do: a convenience for installing the tags // from the context as Go profiler labels. This allows you to // correlated runtime profiling with stats. // // It converts the key/values from the given map to Go profiler labels // and calls pprof.Do. // // Do is going to do nothing if your Go version is below 1.9. func Do(ctx context.Context, f func(ctx context.Context)) { do(ctx, f) } type mutator struct { fn func(t *Map) (*Map, error) } func (m *mutator) Mutate(t *Map) (*Map, error) { return m.fn(t) } opencensus-go-0.24.0/tag/map_codec.go000066400000000000000000000132171433102037600174010ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tag import ( "encoding/binary" "fmt" ) // KeyType defines the types of keys allowed. Currently only keyTypeString is // supported. type keyType byte const ( keyTypeString keyType = iota keyTypeInt64 keyTypeTrue keyTypeFalse tagsVersionID = byte(0) ) type encoderGRPC struct { buf []byte writeIdx, readIdx int } // writeKeyString writes the fieldID '0' followed by the key string and value // string. func (eg *encoderGRPC) writeTagString(k, v string) { eg.writeByte(byte(keyTypeString)) eg.writeStringWithVarintLen(k) eg.writeStringWithVarintLen(v) } func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { eg.writeByte(byte(keyTypeInt64)) eg.writeStringWithVarintLen(k) eg.writeUint64(i) } func (eg *encoderGRPC) writeTagTrue(k string) { eg.writeByte(byte(keyTypeTrue)) eg.writeStringWithVarintLen(k) } func (eg *encoderGRPC) writeTagFalse(k string) { eg.writeByte(byte(keyTypeFalse)) eg.writeStringWithVarintLen(k) } func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { length := len(bytes) eg.growIfRequired(binary.MaxVarintLen64 + length) eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) copy(eg.buf[eg.writeIdx:], bytes) eg.writeIdx += length } func (eg *encoderGRPC) writeStringWithVarintLen(s string) { length := len(s) eg.growIfRequired(binary.MaxVarintLen64 + length) eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) copy(eg.buf[eg.writeIdx:], s) eg.writeIdx += length } func (eg *encoderGRPC) writeByte(v byte) { eg.growIfRequired(1) eg.buf[eg.writeIdx] = v eg.writeIdx++ } func (eg *encoderGRPC) writeUint32(i uint32) { eg.growIfRequired(4) binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) eg.writeIdx += 4 } func (eg *encoderGRPC) writeUint64(i uint64) { eg.growIfRequired(8) binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) eg.writeIdx += 8 } func (eg *encoderGRPC) readByte() byte { b := eg.buf[eg.readIdx] eg.readIdx++ return b } func (eg *encoderGRPC) readUint32() uint32 { i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) eg.readIdx += 4 return i } func (eg *encoderGRPC) readUint64() uint64 { i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) eg.readIdx += 8 return i } func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { if eg.readEnded() { return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) } length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) if valueStart <= 0 { return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) } valueStart += eg.readIdx valueEnd := valueStart + int(length) if valueEnd > len(eg.buf) { return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) } eg.readIdx = valueEnd return eg.buf[valueStart:valueEnd], nil } func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { bytes, err := eg.readBytesWithVarintLen() if err != nil { return "", err } return string(bytes), nil } func (eg *encoderGRPC) growIfRequired(expected int) { if len(eg.buf)-eg.writeIdx < expected { tmp := make([]byte, 2*(len(eg.buf)+1)+expected) copy(tmp, eg.buf) eg.buf = tmp } } func (eg *encoderGRPC) readEnded() bool { return eg.readIdx >= len(eg.buf) } func (eg *encoderGRPC) bytes() []byte { return eg.buf[:eg.writeIdx] } // Encode encodes the tag map into a []byte. It is useful to propagate // the tag maps on wire in binary format. func Encode(m *Map) []byte { if m == nil { return nil } eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } eg.writeByte(tagsVersionID) for k, v := range m.m { if v.m.ttl.ttl == valueTTLUnlimitedPropagation { eg.writeByte(byte(keyTypeString)) eg.writeStringWithVarintLen(k.name) eg.writeBytesWithVarintLen([]byte(v.value)) } } return eg.bytes() } // Decode decodes the given []byte into a tag map. func Decode(bytes []byte) (*Map, error) { ts := newMap() err := DecodeEach(bytes, ts.upsert) if err != nil { // no partial failures return nil, err } return ts, nil } // DecodeEach decodes the given serialized tag map, calling handler for each // tag key and value decoded. func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { eg := &encoderGRPC{ buf: bytes, } if len(eg.buf) == 0 { return nil } version := eg.readByte() if version > tagsVersionID { return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) } for !eg.readEnded() { typ := keyType(eg.readByte()) if typ != keyTypeString { return fmt.Errorf("cannot decode: invalid key type: %q", typ) } k, err := eg.readBytesWithVarintLen() if err != nil { return err } v, err := eg.readBytesWithVarintLen() if err != nil { return err } key, err := NewKey(string(k)) if err != nil { return err } val := string(v) if !checkValue(val) { return errInvalidValue } fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) if err != nil { return err } } return nil } opencensus-go-0.24.0/tag/map_codec_test.go000066400000000000000000000106171433102037600204410ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tag import ( "context" "reflect" "sort" "testing" ) func TestEncodeDecode(t *testing.T) { k1, _ := NewKey("k1") k2, _ := NewKey("k2") k3, _ := NewKey("k3 is very weird <>.,?/'\";:`~!@#$%^&*()_-+={[}]|\\") k4, _ := NewKey("k4") type keyValue struct { k Key v string } testCases := []struct { label string pairs []keyValue }{ { "0", []keyValue{}, }, { "1", []keyValue{ {k1, "v1"}, }, }, { "2", []keyValue{ {k1, "v1"}, {k2, "v2"}, }, }, { "3", []keyValue{ {k1, "v1"}, {k2, "v2"}, {k3, "v3"}, }, }, { "4", []keyValue{ {k1, "v1"}, {k2, "v2"}, {k3, "v3"}, {k4, "v4 is very weird <>.,?/'\";:`~!@#$%^&*()_-+={[}]|\\"}, }, }, } for _, tc := range testCases { mods := make([]Mutator, len(tc.pairs)) for i, pair := range tc.pairs { mods[i] = Upsert(pair.k, pair.v) } ctx, err := New(context.Background(), mods...) if err != nil { t.Errorf("%v: New = %v", tc.label, err) } encoded := Encode(FromContext(ctx)) decoded, err := Decode(encoded) if err != nil { t.Errorf("%v: decoding encoded tag map failed: %v", tc.label, err) } got := make([]keyValue, 0) for k, v := range decoded.m { got = append(got, keyValue{k, v.value}) } want := tc.pairs sort.Slice(got, func(i, j int) bool { return got[i].k.name < got[j].k.name }) sort.Slice(want, func(i, j int) bool { return got[i].k.name < got[j].k.name }) if !reflect.DeepEqual(got, tc.pairs) { t.Errorf("%v: decoded tag map = %#v; want %#v", tc.label, got, want) } } } func TestDecode(t *testing.T) { k1, _ := NewKey("k1") ctx, _ := New(context.Background(), Insert(k1, "v1")) tests := []struct { name string bytes []byte want *Map wantErr bool }{ { name: "valid", bytes: []byte{0, 0, 2, 107, 49, 2, 118, 49}, want: FromContext(ctx), wantErr: false, }, { name: "non-ascii key", bytes: []byte{0, 0, 2, 107, 49, 2, 118, 49, 0, 2, 107, 25, 2, 118, 49}, want: nil, wantErr: true, }, { name: "non-ascii value", bytes: []byte{0, 0, 2, 107, 49, 2, 118, 49, 0, 2, 107, 50, 2, 118, 25}, want: nil, wantErr: true, }, { name: "long value", bytes: []byte{0, 0, 2, 107, 49, 2, 118, 49, 0, 2, 107, 50, 172, 2, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97}, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := Decode(tt.bytes) if (err != nil) != tt.wantErr { t.Errorf("Decode() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("Decode() = %v, want %v", got, tt.want) } }) } } opencensus-go-0.24.0/tag/map_test.go000066400000000000000000000233571433102037600173110ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tag import ( "context" "fmt" "reflect" "strings" "testing" ) var ( ttlUnlimitedPropMd = createMetadatas(WithTTL(TTLUnlimitedPropagation)) ttlNoPropMd = createMetadatas(WithTTL(TTLNoPropagation)) ) func TestContext(t *testing.T) { k1, _ := NewKey("k1") k2, _ := NewKey("k2") ctx := context.Background() ctx, _ = New(ctx, Insert(k1, "v1"), Insert(k2, "v2"), ) got := FromContext(ctx) want := newMap() want.insert(k1, "v1", ttlUnlimitedPropMd) want.insert(k2, "v2", ttlUnlimitedPropMd) if !reflect.DeepEqual(got, want) { t.Errorf("Map = %#v; want %#v", got, want) } } func TestDo(t *testing.T) { k1, _ := NewKey("k1") k2, _ := NewKey("k2") ctx := context.Background() ctx, _ = New(ctx, Insert(k1, "v1"), Insert(k2, "v2"), ) got := FromContext(ctx) want := newMap() want.insert(k1, "v1", ttlUnlimitedPropMd) want.insert(k2, "v2", ttlUnlimitedPropMd) Do(ctx, func(ctx context.Context) { got = FromContext(ctx) }) if !reflect.DeepEqual(got, want) { t.Errorf("Map = %#v; want %#v", got, want) } } func TestNewMap(t *testing.T) { k1, _ := NewKey("k1") k2, _ := NewKey("k2") k3, _ := NewKey("k3") k4, _ := NewKey("k4") k5, _ := NewKey("k5") initial := makeTestTagMap(5) tests := []struct { name string initial *Map mods []Mutator want *Map }{ { name: "from empty; insert", initial: nil, mods: []Mutator{ Insert(k5, "v5"), }, want: makeTestTagMap(2, 4, 5), }, { name: "from empty; insert existing", initial: nil, mods: []Mutator{ Insert(k1, "v1"), }, want: makeTestTagMap(1, 2, 4), }, { name: "from empty; update", initial: nil, mods: []Mutator{ Update(k1, "v1"), }, want: makeTestTagMap(2, 4), }, { name: "from empty; update unexisting", initial: nil, mods: []Mutator{ Update(k5, "v5"), }, want: makeTestTagMap(2, 4), }, { name: "from existing; upsert", initial: initial, mods: []Mutator{ Upsert(k5, "v5"), }, want: makeTestTagMap(2, 4, 5), }, { name: "from existing; delete", initial: initial, mods: []Mutator{ Delete(k2), }, want: makeTestTagMap(4, 5), }, { name: "from empty; invalid", initial: nil, mods: []Mutator{ Insert(k5, "v\x19"), Upsert(k5, "v\x19"), Update(k5, "v\x19"), }, want: nil, }, { name: "from empty; no partial", initial: nil, mods: []Mutator{ Insert(k5, "v1"), Update(k5, "v\x19"), }, want: nil, }, } for _, tt := range tests { mods := []Mutator{ Insert(k1, "v1"), Insert(k2, "v2"), Update(k3, "v3"), Upsert(k4, "v4"), Insert(k2, "v2"), Delete(k1), } mods = append(mods, tt.mods...) ctx := NewContext(context.Background(), tt.initial) ctx, err := New(ctx, mods...) if tt.want != nil && err != nil { t.Errorf("%v: New = %v", tt.name, err) } if got, want := FromContext(ctx), tt.want; !reflect.DeepEqual(got, want) { t.Errorf("%v: got %v; want %v", tt.name, got, want) } } } func TestNewMapWithMetadata(t *testing.T) { k3, _ := NewKey("k3") k4, _ := NewKey("k4") k5, _ := NewKey("k5") tests := []struct { name string initial *Map mods []Mutator want *Map }{ { name: "from empty; insert", initial: nil, mods: []Mutator{ Insert(k5, "5", WithTTL(TTLNoPropagation)), Insert(k4, "4"), }, want: makeTestTagMapWithMetadata( tagContent{"5", ttlNoPropMd}, tagContent{"4", ttlUnlimitedPropMd}), }, { name: "from existing; insert existing", initial: makeTestTagMapWithMetadata(tagContent{"5", ttlNoPropMd}), mods: []Mutator{ Insert(k5, "5", WithTTL(TTLUnlimitedPropagation)), }, want: makeTestTagMapWithMetadata(tagContent{"5", ttlNoPropMd}), }, { name: "from existing; update non-existing", initial: makeTestTagMapWithMetadata(tagContent{"5", ttlNoPropMd}), mods: []Mutator{ Update(k4, "4", WithTTL(TTLUnlimitedPropagation)), }, want: makeTestTagMapWithMetadata(tagContent{"5", ttlNoPropMd}), }, { name: "from existing; update existing", initial: makeTestTagMapWithMetadata( tagContent{"5", ttlUnlimitedPropMd}, tagContent{"4", ttlNoPropMd}), mods: []Mutator{ Update(k5, "5"), Update(k4, "4", WithTTL(TTLUnlimitedPropagation)), }, want: makeTestTagMapWithMetadata( tagContent{"5", ttlUnlimitedPropMd}, tagContent{"4", ttlUnlimitedPropMd}), }, { name: "from existing; upsert existing", initial: makeTestTagMapWithMetadata( tagContent{"5", ttlNoPropMd}, tagContent{"4", ttlNoPropMd}), mods: []Mutator{ Upsert(k4, "4", WithTTL(TTLUnlimitedPropagation)), }, want: makeTestTagMapWithMetadata( tagContent{"5", ttlNoPropMd}, tagContent{"4", ttlUnlimitedPropMd}), }, { name: "from existing; upsert non-existing", initial: makeTestTagMapWithMetadata( tagContent{"5", ttlNoPropMd}), mods: []Mutator{ Upsert(k4, "4", WithTTL(TTLUnlimitedPropagation)), Upsert(k3, "3"), }, want: makeTestTagMapWithMetadata( tagContent{"5", ttlNoPropMd}, tagContent{"4", ttlUnlimitedPropMd}, tagContent{"3", ttlUnlimitedPropMd}), }, { name: "from existing; delete", initial: makeTestTagMapWithMetadata( tagContent{"5", ttlNoPropMd}, tagContent{"4", ttlNoPropMd}), mods: []Mutator{ Delete(k5), }, want: makeTestTagMapWithMetadata( tagContent{"4", ttlNoPropMd}), }, { name: "from non-existing; upsert with multiple-metadata", initial: nil, mods: []Mutator{ Upsert(k4, "4", WithTTL(TTLUnlimitedPropagation), WithTTL(TTLNoPropagation)), Upsert(k5, "5", WithTTL(TTLNoPropagation), WithTTL(TTLUnlimitedPropagation)), }, want: makeTestTagMapWithMetadata( tagContent{"4", ttlNoPropMd}, tagContent{"5", ttlUnlimitedPropMd}), }, { name: "from non-existing; insert with multiple-metadata", initial: nil, mods: []Mutator{ Insert(k5, "5", WithTTL(TTLNoPropagation), WithTTL(TTLUnlimitedPropagation)), }, want: makeTestTagMapWithMetadata( tagContent{"5", ttlUnlimitedPropMd}), }, { name: "from existing; update with multiple-metadata", initial: makeTestTagMapWithMetadata( tagContent{"5", ttlNoPropMd}), mods: []Mutator{ Update(k5, "5", WithTTL(TTLNoPropagation), WithTTL(TTLUnlimitedPropagation)), }, want: makeTestTagMapWithMetadata( tagContent{"5", ttlUnlimitedPropMd}), }, { name: "from empty; update invalid", initial: nil, mods: []Mutator{ Insert(k4, "4\x19", WithTTL(TTLUnlimitedPropagation)), Upsert(k4, "4\x19", WithTTL(TTLUnlimitedPropagation)), Update(k4, "4\x19", WithTTL(TTLUnlimitedPropagation)), }, want: nil, }, { name: "from empty; insert partial", initial: nil, mods: []Mutator{ Upsert(k3, "3", WithTTL(TTLUnlimitedPropagation)), Upsert(k4, "4\x19", WithTTL(TTLUnlimitedPropagation)), }, want: nil, }, } // Test api for insert, update, and upsert using metadata. for _, tt := range tests { ctx := NewContext(context.Background(), tt.initial) ctx, err := New(ctx, tt.mods...) if tt.want != nil && err != nil { t.Errorf("%v: New = %v", tt.name, err) } if got, want := FromContext(ctx), tt.want; !reflect.DeepEqual(got, want) { t.Errorf("%v: got %v; want %v", tt.name, got, want) } } } func TestNewValidation(t *testing.T) { tests := []struct { err string seed *Map }{ // Key name validation in seed {err: "invalid key", seed: &Map{m: map[Key]tagContent{{name: ""}: {"foo", ttlNoPropMd}}}}, {err: "", seed: &Map{m: map[Key]tagContent{{name: "key"}: {"foo", ttlNoPropMd}}}}, {err: "", seed: &Map{m: map[Key]tagContent{{name: strings.Repeat("a", 255)}: {"census", ttlNoPropMd}}}}, {err: "invalid key", seed: &Map{m: map[Key]tagContent{{name: strings.Repeat("a", 256)}: {"census", ttlNoPropMd}}}}, {err: "invalid key", seed: &Map{m: map[Key]tagContent{{name: "Приве́т"}: {"census", ttlNoPropMd}}}}, // Value validation {err: "", seed: &Map{m: map[Key]tagContent{{name: "key"}: {"", ttlNoPropMd}}}}, {err: "", seed: &Map{m: map[Key]tagContent{{name: "key"}: {strings.Repeat("a", 255), ttlNoPropMd}}}}, {err: "invalid value", seed: &Map{m: map[Key]tagContent{{name: "key"}: {"Приве́т", ttlNoPropMd}}}}, {err: "invalid value", seed: &Map{m: map[Key]tagContent{{name: "key"}: {strings.Repeat("a", 256), ttlNoPropMd}}}}, } for i, tt := range tests { ctx := NewContext(context.Background(), tt.seed) ctx, err := New(ctx) if tt.err != "" { if err == nil { t.Errorf("#%d: got nil error; want %q", i, tt.err) continue } else if s, substr := err.Error(), tt.err; !strings.Contains(s, substr) { t.Errorf("#%d:\ngot %q\nwant %q", i, s, substr) } continue } if err != nil { t.Errorf("#%d: got %q want nil", i, err) continue } m := FromContext(ctx) if m == nil { t.Errorf("#%d: got nil map", i) continue } } } func makeTestTagMap(ids ...int) *Map { m := newMap() for _, v := range ids { k, _ := NewKey(fmt.Sprintf("k%d", v)) m.m[k] = tagContent{fmt.Sprintf("v%d", v), ttlUnlimitedPropMd} } return m } func makeTestTagMapWithMetadata(tcs ...tagContent) *Map { m := newMap() for _, tc := range tcs { k, _ := NewKey(fmt.Sprintf("k%s", tc.value)) m.m[k] = tc } return m } opencensus-go-0.24.0/tag/metadata.go000066400000000000000000000031601433102037600172430ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package tag const ( // valueTTLNoPropagation prevents tag from propagating. valueTTLNoPropagation = 0 // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. valueTTLUnlimitedPropagation = -1 ) // TTL is metadata that specifies number of hops a tag can propagate. // Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata type TTL struct { ttl int } var ( // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} // TTLNoPropagation is TTL metadata that prevents tag from propagating. TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} ) type metadatas struct { ttl TTL } // Metadata applies metadatas specified by the function. type Metadata func(*metadatas) // WithTTL applies metadata with provided ttl. func WithTTL(ttl TTL) Metadata { return func(m *metadatas) { m.ttl = ttl } } opencensus-go-0.24.0/tag/profile_19.go000066400000000000000000000016421433102037600174370ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build go1.9 // +build go1.9 package tag import ( "context" "runtime/pprof" ) func do(ctx context.Context, f func(ctx context.Context)) { m := FromContext(ctx) keyvals := make([]string, 0, 2*len(m.m)) for k, v := range m.m { keyvals = append(keyvals, k.Name(), v.value) } pprof.Do(ctx, pprof.Labels(keyvals...), f) } opencensus-go-0.24.0/tag/profile_not19.go000066400000000000000000000013361433102037600201600ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !go1.9 // +build !go1.9 package tag import "context" func do(ctx context.Context, f func(ctx context.Context)) { f(ctx) } opencensus-go-0.24.0/tag/validate.go000066400000000000000000000026231433102037600172570ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tag import "errors" const ( maxKeyLength = 255 // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). validKeyValueMin = 32 validKeyValueMax = 126 ) var ( errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") ) func checkKeyName(name string) bool { if len(name) == 0 { return false } if len(name) > maxKeyLength { return false } return isASCII(name) } func isASCII(s string) bool { for _, c := range s { if (c < validKeyValueMin) || (c > validKeyValueMax) { return false } } return true } func checkValue(v string) bool { if len(v) > maxKeyLength { return false } return isASCII(v) } opencensus-go-0.24.0/tag/validate_test.go000066400000000000000000000037221433102037600203170ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tag import ( "strings" "testing" ) func TestCheckKeyName(t *testing.T) { tests := []struct { name string key string wantOK bool }{ { name: "valid", key: "k1", wantOK: true, }, { name: "invalid i", key: "k\x19", wantOK: false, }, { name: "invalid ii", key: "k\x7f", wantOK: false, }, { name: "empty", key: "", wantOK: false, }, { name: "whitespace", key: " k ", wantOK: true, }, { name: "long", key: strings.Repeat("a", 256), wantOK: false, }, } for _, tt := range tests { ok := checkKeyName(tt.key) if ok != tt.wantOK { t.Errorf("%v: got %v; want %v", tt.name, ok, tt.wantOK) } } } func TestCheckValue(t *testing.T) { tests := []struct { name string value string wantOK bool }{ { name: "valid", value: "v1", wantOK: true, }, { name: "invalid i", value: "k\x19", wantOK: false, }, { name: "invalid ii", value: "k\x7f", wantOK: false, }, { name: "empty", value: "", wantOK: true, }, { name: "whitespace", value: " v ", wantOK: true, }, { name: "long", value: strings.Repeat("a", 256), wantOK: false, }, } for _, tt := range tests { ok := checkValue(tt.value) if ok != tt.wantOK { t.Errorf("%v: got %v; want %v", tt.name, ok, tt.wantOK) } } } opencensus-go-0.24.0/trace/000077500000000000000000000000001433102037600154575ustar00rootroot00000000000000opencensus-go-0.24.0/trace/basetypes.go000066400000000000000000000072451433102037600200150ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "fmt" "time" ) type ( // TraceID is a 16-byte identifier for a set of spans. TraceID [16]byte // SpanID is an 8-byte identifier for a single span. SpanID [8]byte ) func (t TraceID) String() string { return fmt.Sprintf("%02x", t[:]) } func (s SpanID) String() string { return fmt.Sprintf("%02x", s[:]) } // Annotation represents a text annotation with a set of attributes and a timestamp. type Annotation struct { Time time.Time Message string Attributes map[string]interface{} } // Attribute represents a key-value pair on a span, link or annotation. // Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. type Attribute struct { key string value interface{} } // Key returns the attribute's key func (a *Attribute) Key() string { return a.key } // Value returns the attribute's value func (a *Attribute) Value() interface{} { return a.value } // BoolAttribute returns a bool-valued attribute. func BoolAttribute(key string, value bool) Attribute { return Attribute{key: key, value: value} } // Int64Attribute returns an int64-valued attribute. func Int64Attribute(key string, value int64) Attribute { return Attribute{key: key, value: value} } // Float64Attribute returns a float64-valued attribute. func Float64Attribute(key string, value float64) Attribute { return Attribute{key: key, value: value} } // StringAttribute returns a string-valued attribute. func StringAttribute(key string, value string) Attribute { return Attribute{key: key, value: value} } // LinkType specifies the relationship between the span that had the link // added, and the linked span. type LinkType int32 // LinkType values. const ( LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. LinkTypeChild // The linked span is a child of the current span. LinkTypeParent // The linked span is the parent of the current span. ) // Link represents a reference from one span to another span. type Link struct { TraceID TraceID SpanID SpanID Type LinkType // Attributes is a set of attributes on the link. Attributes map[string]interface{} } // MessageEventType specifies the type of message event. type MessageEventType int32 // MessageEventType values. const ( MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. MessageEventTypeSent // Indicates a sent RPC message. MessageEventTypeRecv // Indicates a received RPC message. ) // MessageEvent represents an event describing a message sent or received on the network. type MessageEvent struct { Time time.Time EventType MessageEventType MessageID int64 UncompressedByteSize int64 CompressedByteSize int64 } // Status is the status of a Span. type Status struct { // Code is a status code. Zero indicates success. // // If Code will be propagated to Google APIs, it ideally should be a value from // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . Code int32 Message string } opencensus-go-0.24.0/trace/benchmark_test.go000066400000000000000000000053751433102037600210110ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "testing" ) func BenchmarkStartEndSpan(b *testing.B) { traceBenchmark(b, func(b *testing.B) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := StartSpan(ctx, "/foo") span.End() } }) } func BenchmarkSpanWithAnnotations_4(b *testing.B) { traceBenchmark(b, func(b *testing.B) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := StartSpan(ctx, "/foo") span.AddAttributes( BoolAttribute("key1", false), StringAttribute("key2", "hello"), Int64Attribute("key3", 123), Float64Attribute("key4", 123.456), ) span.End() } }) } func BenchmarkSpanWithAnnotations_8(b *testing.B) { traceBenchmark(b, func(b *testing.B) { ctx := context.Background() b.ResetTimer() for i := 0; i < b.N; i++ { _, span := StartSpan(ctx, "/foo") span.AddAttributes( BoolAttribute("key1", false), BoolAttribute("key2", true), StringAttribute("key3", "hello"), StringAttribute("key4", "hello"), Int64Attribute("key5", 123), Int64Attribute("key6", 456), Float64Attribute("key7", 123.456), Float64Attribute("key8", 456.789), ) span.End() } }) } func BenchmarkTraceID_DotString(b *testing.B) { traceBenchmark(b, func(b *testing.B) { t := TraceID{0x0D, 0x0E, 0x0A, 0x0D, 0x0B, 0x0E, 0x0E, 0x0F, 0x0F, 0x0E, 0x0E, 0x0B, 0x0D, 0x0A, 0x0E, 0x0D} want := "0d0e0a0d0b0e0e0f0f0e0e0b0d0a0e0d" for i := 0; i < b.N; i++ { if got := t.String(); got != want { b.Fatalf("got = %q want = %q", got, want) } } }) } func BenchmarkSpanID_DotString(b *testing.B) { traceBenchmark(b, func(b *testing.B) { s := SpanID{0x0D, 0x0E, 0x0A, 0x0D, 0x0B, 0x0E, 0x0E, 0x0F} want := "0d0e0a0d0b0e0e0f" for i := 0; i < b.N; i++ { if got := s.String(); got != want { b.Fatalf("got = %q want = %q", got, want) } } }) } func traceBenchmark(b *testing.B, fn func(*testing.B)) { b.Run("AlwaysSample", func(b *testing.B) { b.ReportAllocs() ApplyConfig(Config{DefaultSampler: AlwaysSample()}) fn(b) }) b.Run("NeverSample", func(b *testing.B) { b.ReportAllocs() ApplyConfig(Config{DefaultSampler: NeverSample()}) fn(b) }) } opencensus-go-0.24.0/trace/config.go000066400000000000000000000050331433102037600172540ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "sync" "go.opencensus.io/trace/internal" ) // Config represents the global tracing configuration. type Config struct { // DefaultSampler is the default sampler used when creating new spans. DefaultSampler Sampler // IDGenerator is for internal use only. IDGenerator internal.IDGenerator // MaxAnnotationEventsPerSpan is max number of annotation events per span MaxAnnotationEventsPerSpan int // MaxMessageEventsPerSpan is max number of message events per span MaxMessageEventsPerSpan int // MaxAnnotationEventsPerSpan is max number of attributes per span MaxAttributesPerSpan int // MaxLinksPerSpan is max number of links per span MaxLinksPerSpan int } var configWriteMu sync.Mutex const ( // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span DefaultMaxAnnotationEventsPerSpan = 32 // DefaultMaxMessageEventsPerSpan is default max number of message events per span DefaultMaxMessageEventsPerSpan = 128 // DefaultMaxAttributesPerSpan is default max number of attributes per span DefaultMaxAttributesPerSpan = 32 // DefaultMaxLinksPerSpan is default max number of links per span DefaultMaxLinksPerSpan = 32 ) // ApplyConfig applies changes to the global tracing configuration. // // Fields not provided in the given config are going to be preserved. func ApplyConfig(cfg Config) { configWriteMu.Lock() defer configWriteMu.Unlock() c := *config.Load().(*Config) if cfg.DefaultSampler != nil { c.DefaultSampler = cfg.DefaultSampler } if cfg.IDGenerator != nil { c.IDGenerator = cfg.IDGenerator } if cfg.MaxAnnotationEventsPerSpan > 0 { c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan } if cfg.MaxMessageEventsPerSpan > 0 { c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan } if cfg.MaxAttributesPerSpan > 0 { c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan } if cfg.MaxLinksPerSpan > 0 { c.MaxLinksPerSpan = cfg.MaxLinksPerSpan } config.Store(&c) } opencensus-go-0.24.0/trace/config_test.go000066400000000000000000000071621433102037600203200ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "reflect" "testing" ) func TestApplyConfig(t *testing.T) { cfg := config.Load().(*Config) defaultCfg := Config{ DefaultSampler: cfg.DefaultSampler, IDGenerator: cfg.IDGenerator, MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, MaxLinksPerSpan: DefaultMaxLinksPerSpan, } testCases := []struct { name string newCfg Config wantCfg Config }{ { name: "Initialize to default config", newCfg: defaultCfg, wantCfg: defaultCfg, }, { name: "Empty Config", newCfg: Config{}, wantCfg: defaultCfg, }, { name: "Valid non-default config", newCfg: Config{ MaxAttributesPerSpan: 1, MaxAnnotationEventsPerSpan: 2, MaxMessageEventsPerSpan: 3, MaxLinksPerSpan: 4, }, wantCfg: Config{ DefaultSampler: cfg.DefaultSampler, IDGenerator: cfg.IDGenerator, MaxAttributesPerSpan: 1, MaxAnnotationEventsPerSpan: 2, MaxMessageEventsPerSpan: 3, MaxLinksPerSpan: 4, }, }, { name: "Partially invalid config", newCfg: Config{ MaxAttributesPerSpan: -1, MaxAnnotationEventsPerSpan: 3, MaxMessageEventsPerSpan: -3, MaxLinksPerSpan: 5, }, wantCfg: Config{ DefaultSampler: cfg.DefaultSampler, IDGenerator: cfg.IDGenerator, MaxAttributesPerSpan: 1, MaxAnnotationEventsPerSpan: 3, MaxMessageEventsPerSpan: 3, MaxLinksPerSpan: 5, }, }, } for i, tt := range testCases { newCfg := tt.newCfg ApplyConfig(newCfg) gotCfg := config.Load().(*Config) wantCfg := tt.wantCfg if got, want := reflect.ValueOf(gotCfg.DefaultSampler).Pointer(), reflect.ValueOf(wantCfg.DefaultSampler).Pointer(); got != want { t.Fatalf("testId = %d, testName = %s: config.DefaultSampler = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.IDGenerator, wantCfg.IDGenerator; got != want { t.Fatalf("testId = %d, testName = %s: config.IDGenerator = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.MaxAttributesPerSpan, wantCfg.MaxAttributesPerSpan; got != want { t.Fatalf("testId = %d, testName = %s: config.MaxAttributesPerSpan = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.MaxLinksPerSpan, wantCfg.MaxLinksPerSpan; got != want { t.Fatalf("testId = %d, testName = %s: config.MaxLinksPerSpan = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.MaxAnnotationEventsPerSpan, wantCfg.MaxAnnotationEventsPerSpan; got != want { t.Fatalf("testId = %d, testName = %s: config.MaxAnnotationEventsPerSpan = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.MaxMessageEventsPerSpan, wantCfg.MaxMessageEventsPerSpan; got != want { t.Fatalf("testId = %d, testName = %s: config.MaxMessageEventsPerSpan = %#v; want %#v", i, tt.name, got, want) } } } opencensus-go-0.24.0/trace/doc.go000066400000000000000000000037061433102037600165610ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /* Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io # Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. # Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. It is common to want to capture all the activity of a function call in a span. For this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: ctx, span := trace.StartSpan(ctx, "example.com/Run") defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. */ package trace // import "go.opencensus.io/trace" opencensus-go-0.24.0/trace/evictedqueue.go000066400000000000000000000020021433102037600204700ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace type evictedQueue struct { queue []interface{} capacity int droppedCount int } func newEvictedQueue(capacity int) *evictedQueue { eq := &evictedQueue{ capacity: capacity, queue: make([]interface{}, 0), } return eq } func (eq *evictedQueue) add(value interface{}) { if len(eq.queue) == eq.capacity { eq.queue = eq.queue[1:] eq.droppedCount++ } eq.queue = append(eq.queue, value) } opencensus-go-0.24.0/trace/evictedqueue_test.go000066400000000000000000000033631433102037600215420ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "reflect" "testing" ) func init() { } func TestAdd(t *testing.T) { q := newEvictedQueue(3) q.add("value1") q.add("value2") if wantLen, gotLen := 2, len(q.queue); wantLen != gotLen { t.Errorf("got queue length %d want %d", gotLen, wantLen) } } func (eq *evictedQueue) queueToArray() []string { arr := make([]string, 0) for _, value := range eq.queue { arr = append(arr, value.(string)) } return arr } func TestDropCount(t *testing.T) { q := newEvictedQueue(3) q.add("value1") q.add("value2") q.add("value3") q.add("value1") q.add("value4") if wantLen, gotLen := 3, len(q.queue); wantLen != gotLen { t.Errorf("got queue length %d want %d", gotLen, wantLen) } if wantDropCount, gotDropCount := 2, q.droppedCount; wantDropCount != gotDropCount { t.Errorf("got drop count %d want %d", gotDropCount, wantDropCount) } wantArr := []string{"value3", "value1", "value4"} gotArr := q.queueToArray() if wantLen, gotLen := len(wantArr), len(gotArr); gotLen != wantLen { t.Errorf("got array len %d want %d", gotLen, wantLen) } if !reflect.DeepEqual(gotArr, wantArr) { t.Errorf("got array = %#v; want %#v", gotArr, wantArr) } } opencensus-go-0.24.0/trace/examples_test.go000066400000000000000000000021431433102037600206630ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace_test import ( "context" "fmt" "go.opencensus.io/trace" ) // This example shows how to use StartSpan and (*Span).End to capture // a function execution in a Span. It assumes that the function // has a context.Context argument. func ExampleStartSpan() { printEvens := func(ctx context.Context) { ctx, span := trace.StartSpan(ctx, "my/package.Function") defer span.End() for i := 0; i < 10; i++ { if i%2 == 0 { fmt.Printf("Even!\n") } } } ctx := context.Background() printEvens(ctx) } opencensus-go-0.24.0/trace/export.go000066400000000000000000000052411433102037600173310ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "sync" "sync/atomic" "time" ) // Exporter is a type for functions that receive sampled trace spans. // // The ExportSpan method should be safe for concurrent use and should return // quickly; if an Exporter takes a significant amount of time to process a // SpanData, that work should be done on another goroutine. // // The SpanData should not be modified, but a pointer to it can be kept. type Exporter interface { ExportSpan(s *SpanData) } type exportersMap map[Exporter]struct{} var ( exporterMu sync.Mutex exporters atomic.Value ) // RegisterExporter adds to the list of Exporters that will receive sampled // trace spans. // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { exporterMu.Lock() new := make(exportersMap) if old, ok := exporters.Load().(exportersMap); ok { for k, v := range old { new[k] = v } } new[e] = struct{}{} exporters.Store(new) exporterMu.Unlock() } // UnregisterExporter removes from the list of Exporters the Exporter that was // registered with the given name. func UnregisterExporter(e Exporter) { exporterMu.Lock() new := make(exportersMap) if old, ok := exporters.Load().(exportersMap); ok { for k, v := range old { new[k] = v } } delete(new, e) exporters.Store(new) exporterMu.Unlock() } // SpanData contains all the information collected by a Span. type SpanData struct { SpanContext ParentSpanID SpanID SpanKind int Name string StartTime time.Time // The wall clock time of EndTime will be adjusted to always be offset // from StartTime by the duration of the span. EndTime time.Time // The values of Attributes each have type string, bool, or int64. Attributes map[string]interface{} Annotations []Annotation MessageEvents []MessageEvent Status Links []Link HasRemoteParent bool DroppedAttributeCount int DroppedAnnotationCount int DroppedMessageEventCount int DroppedLinkCount int // ChildSpanCount holds the number of child span created for this span. ChildSpanCount int } opencensus-go-0.24.0/trace/internal/000077500000000000000000000000001433102037600172735ustar00rootroot00000000000000opencensus-go-0.24.0/trace/internal/internal.go000066400000000000000000000014401433102037600214350ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package internal provides trace internals. package internal // IDGenerator allows custom generators for TraceId and SpanId. type IDGenerator interface { NewTraceID() [16]byte NewSpanID() [8]byte } opencensus-go-0.24.0/trace/lrumap.go000066400000000000000000000030551433102037600173110ustar00rootroot00000000000000// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "github.com/golang/groupcache/lru" ) // A simple lru.Cache wrapper that tracks the keys of the current contents and // the cumulative number of evicted items. type lruMap struct { cacheKeys map[lru.Key]bool cache *lru.Cache droppedCount int } func newLruMap(size int) *lruMap { lm := &lruMap{ cacheKeys: make(map[lru.Key]bool), cache: lru.New(size), droppedCount: 0, } lm.cache.OnEvicted = func(key lru.Key, value interface{}) { delete(lm.cacheKeys, key) lm.droppedCount++ } return lm } func (lm lruMap) len() int { return lm.cache.Len() } func (lm lruMap) keys() []interface{} { keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } return keys } func (lm *lruMap) add(key, value interface{}) { lm.cacheKeys[lru.Key(key)] = true lm.cache.Add(lru.Key(key), value) } func (lm *lruMap) get(key interface{}) (interface{}, bool) { return lm.cache.Get(key) } opencensus-go-0.24.0/trace/propagation/000077500000000000000000000000001433102037600200025ustar00rootroot00000000000000opencensus-go-0.24.0/trace/propagation/propagation.go000066400000000000000000000062421433102037600226600ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package propagation implements the binary trace context format. package propagation // import "go.opencensus.io/trace/propagation" // TODO: link to external spec document. // BinaryFormat format: // // Binary value: // version_id: 1 byte representing the version id. // // For version_id = 0: // // version_format: // field_format: // // Fields: // // TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. // SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. // TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. // // Fields MUST be encoded using the field id order (smaller to higher). // // Valid value example: // // {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, // 98, 99, 100, 101, 102, 103, 104, 2, 1} // // version_id = 0; // trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} // span_id = {97, 98, 99, 100, 101, 102, 103, 104}; // trace_options = {1}; import ( "net/http" "go.opencensus.io/trace" ) // Binary returns the binary format representation of a SpanContext. // // If sc is the zero value, Binary returns nil. func Binary(sc trace.SpanContext) []byte { if sc == (trace.SpanContext{}) { return nil } var b [29]byte copy(b[2:18], sc.TraceID[:]) b[18] = 1 copy(b[19:27], sc.SpanID[:]) b[27] = 2 b[28] = uint8(sc.TraceOptions) return b[:] } // FromBinary returns the SpanContext represented by b. // // If b has an unsupported version ID or contains no TraceID, FromBinary // returns with ok==false. func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { if len(b) == 0 || b[0] != 0 { return trace.SpanContext{}, false } b = b[1:] if len(b) >= 17 && b[0] == 0 { copy(sc.TraceID[:], b[1:17]) b = b[17:] } else { return trace.SpanContext{}, false } if len(b) >= 9 && b[0] == 1 { copy(sc.SpanID[:], b[1:9]) b = b[9:] } if len(b) >= 2 && b[0] == 2 { sc.TraceOptions = trace.TraceOptions(b[1]) } return sc, true } // HTTPFormat implementations propagate span contexts // in HTTP requests. // // SpanContextFromRequest extracts a span context from incoming // requests. // // SpanContextToRequest modifies the given request to include the given // span context. type HTTPFormat interface { SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) SpanContextToRequest(sc trace.SpanContext, req *http.Request) } // TODO(jbd): Find a more representative but short name for HTTPFormat. opencensus-go-0.24.0/trace/propagation/propagation_test.go000066400000000000000000000102371433102037600237160ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package propagation import ( "bytes" "fmt" "testing" . "go.opencensus.io/trace" ) func TestBinary(t *testing.T) { tid := TraceID{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f} sid := SpanID{0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68} b := []byte{ 0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1, } if b2 := Binary(SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: 1, }); !bytes.Equal(b2, b) { t.Errorf("Binary: got serialization %02x want %02x", b2, b) } sc, ok := FromBinary(b) if !ok { t.Errorf("FromBinary: got ok==%t, want true", ok) } if got := sc.TraceID; got != tid { t.Errorf("FromBinary: got trace ID %s want %s", got, tid) } if got := sc.SpanID; got != sid { t.Errorf("FromBinary: got span ID %s want %s", got, sid) } b[0] = 1 sc, ok = FromBinary(b) if ok { t.Errorf("FromBinary: decoding bytes containing an unsupported version: got ok==%t want false", ok) } b = []byte{0, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1} sc, ok = FromBinary(b) if ok { t.Errorf("FromBinary: decoding bytes without a TraceID: got ok==%t want false", ok) } if b := Binary(SpanContext{}); b != nil { t.Errorf("Binary(SpanContext{}): got serialization %02x want nil", b) } } func TestFromBinary(t *testing.T) { validData := []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1} tests := []struct { name string data []byte wantTraceID TraceID wantSpanID SpanID wantOpts TraceOptions wantOk bool }{ { name: "nil data", data: nil, wantOk: false, }, { name: "short data", data: []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77}, wantOk: false, }, { name: "wrong field number", data: []byte{0, 1, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77}, wantOk: false, }, { name: "valid data", data: validData, wantTraceID: TraceID{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79}, wantSpanID: SpanID{97, 98, 99, 100, 101, 102, 103, 104}, wantOpts: 1, wantOk: true, }, } for _, tt := range tests { sc, gotOk := FromBinary(tt.data) gotTraceID, gotSpanID, gotOpts := sc.TraceID, sc.SpanID, sc.TraceOptions if gotTraceID != tt.wantTraceID { t.Errorf("%s: Decode() gotTraceID = %v, want %v", tt.name, gotTraceID, tt.wantTraceID) } if gotSpanID != tt.wantSpanID { t.Errorf("%s: Decode() gotSpanID = %v, want %v", tt.name, gotSpanID, tt.wantSpanID) } if gotOpts != tt.wantOpts { t.Errorf("%s: Decode() gotOpts = %v, want %v", tt.name, gotOpts, tt.wantOpts) } if gotOk != tt.wantOk { t.Errorf("%s: Decode() gotOk = %v, want %v", tt.name, gotOk, tt.wantOk) } } } func BenchmarkBinary(b *testing.B) { tid := TraceID{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f} sid := SpanID{0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68} sc := SpanContext{ TraceID: tid, SpanID: sid, } var x byte for i := 0; i < b.N; i++ { bin := Binary(sc) x += bin[0] } if x == 1 { fmt.Println(x) // try to prevent optimizing-out } } func BenchmarkFromBinary(b *testing.B) { bin := []byte{ 0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1, } var x byte for i := 0; i < b.N; i++ { sc, _ := FromBinary(bin) x += sc.TraceID[0] } if x == 1 { fmt.Println(x) // try to prevent optimizing-out } } opencensus-go-0.24.0/trace/sampling.go000066400000000000000000000043221433102037600176210ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "encoding/binary" ) const defaultSamplingProbability = 1e-4 // Sampler decides whether a trace should be sampled and exported. type Sampler func(SamplingParameters) SamplingDecision // SamplingParameters contains the values passed to a Sampler. type SamplingParameters struct { ParentContext SpanContext TraceID TraceID SpanID SpanID Name string HasRemoteParent bool } // SamplingDecision is the value returned by a Sampler. type SamplingDecision struct { Sample bool } // ProbabilitySampler returns a Sampler that samples a given fraction of traces. // // It also samples spans whose parents are sampled. func ProbabilitySampler(fraction float64) Sampler { if !(fraction >= 0) { fraction = 0 } else if fraction >= 1 { return AlwaysSample() } traceIDUpperBound := uint64(fraction * (1 << 63)) return Sampler(func(p SamplingParameters) SamplingDecision { if p.ParentContext.IsSampled() { return SamplingDecision{Sample: true} } x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 return SamplingDecision{Sample: x < traceIDUpperBound} }) } // AlwaysSample returns a Sampler that samples every trace. // Be careful about using this sampler in a production application with // significant traffic: a new trace will be started and exported for every // request. func AlwaysSample() Sampler { return func(p SamplingParameters) SamplingDecision { return SamplingDecision{Sample: true} } } // NeverSample returns a Sampler that samples no traces. func NeverSample() Sampler { return func(p SamplingParameters) SamplingDecision { return SamplingDecision{Sample: false} } } opencensus-go-0.24.0/trace/spanbucket.go000066400000000000000000000065321433102037600201530ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "time" ) // samplePeriod is the minimum time between accepting spans in a single bucket. const samplePeriod = time.Second // defaultLatencies contains the default latency bucket bounds. // TODO: consider defaults, make configurable var defaultLatencies = [...]time.Duration{ 10 * time.Microsecond, 100 * time.Microsecond, time.Millisecond, 10 * time.Millisecond, 100 * time.Millisecond, time.Second, 10 * time.Second, time.Minute, } // bucket is a container for a set of spans for a particular error code or latency range. type bucket struct { nextTime time.Time // next time we can accept a span buffer []*SpanData // circular buffer of spans nextIndex int // location next SpanData should be placed in buffer overflow bool // whether the circular buffer has wrapped around } func makeBucket(bufferSize int) bucket { return bucket{ buffer: make([]*SpanData, bufferSize), } } // add adds a span to the bucket, if nextTime has been reached. func (b *bucket) add(s *SpanData) { if s.EndTime.Before(b.nextTime) { return } if len(b.buffer) == 0 { return } b.nextTime = s.EndTime.Add(samplePeriod) b.buffer[b.nextIndex] = s b.nextIndex++ if b.nextIndex == len(b.buffer) { b.nextIndex = 0 b.overflow = true } } // size returns the number of spans in the bucket. func (b *bucket) size() int { if b.overflow { return len(b.buffer) } return b.nextIndex } // span returns the ith span in the bucket. func (b *bucket) span(i int) *SpanData { if !b.overflow { return b.buffer[i] } if i < len(b.buffer)-b.nextIndex { return b.buffer[b.nextIndex+i] } return b.buffer[b.nextIndex+i-len(b.buffer)] } // resize changes the size of the bucket to n, keeping up to n existing spans. func (b *bucket) resize(n int) { cur := b.size() newBuffer := make([]*SpanData, n) if cur < n { for i := 0; i < cur; i++ { newBuffer[i] = b.span(i) } b.buffer = newBuffer b.nextIndex = cur b.overflow = false return } for i := 0; i < n; i++ { newBuffer[i] = b.span(i + cur - n) } b.buffer = newBuffer b.nextIndex = 0 b.overflow = true } // latencyBucket returns the appropriate bucket number for a given latency. func latencyBucket(latency time.Duration) int { i := 0 for i < len(defaultLatencies) && latency >= defaultLatencies[i] { i++ } return i } // latencyBucketBounds returns the lower and upper bounds for a latency bucket // number. // // The lower bound is inclusive, the upper bound is exclusive (except for the // last bucket.) func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { if index == 0 { return 0, defaultLatencies[index] } if index == len(defaultLatencies) { return defaultLatencies[index-1], 1<<63 - 1 } return defaultLatencies[index-1], defaultLatencies[index] } opencensus-go-0.24.0/trace/spanstore.go000066400000000000000000000166441433102037600200370ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "sync" "time" "go.opencensus.io/internal" ) const ( maxBucketSize = 100000 defaultBucketSize = 10 ) var ( ssmu sync.RWMutex // protects spanStores spanStores = make(map[string]*spanStore) ) // This exists purely to avoid exposing internal methods used by z-Pages externally. type internalOnly struct{} func init() { //TODO(#412): remove internal.Trace = &internalOnly{} } // ReportActiveSpans returns the active spans for the given name. func (i internalOnly) ReportActiveSpans(name string) []*SpanData { s := spanStoreForName(name) if s == nil { return nil } var out []*SpanData s.mu.Lock() defer s.mu.Unlock() for activeSpan := range s.active { if s, ok := activeSpan.(*span); ok { out = append(out, s.makeSpanData()) } } return out } // ReportSpansByError returns a sample of error spans. // // If code is nonzero, only spans with that status code are returned. func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { s := spanStoreForName(name) if s == nil { return nil } var out []*SpanData s.mu.Lock() defer s.mu.Unlock() if code != 0 { if b, ok := s.errors[code]; ok { for _, sd := range b.buffer { if sd == nil { break } out = append(out, sd) } } } else { for _, b := range s.errors { for _, sd := range b.buffer { if sd == nil { break } out = append(out, sd) } } } return out } // ConfigureBucketSizes sets the number of spans to keep per latency and error // bucket for different span names. func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { for _, bc := range bcs { latencyBucketSize := bc.MaxRequestsSucceeded if latencyBucketSize < 0 { latencyBucketSize = 0 } if latencyBucketSize > maxBucketSize { latencyBucketSize = maxBucketSize } errorBucketSize := bc.MaxRequestsErrors if errorBucketSize < 0 { errorBucketSize = 0 } if errorBucketSize > maxBucketSize { errorBucketSize = maxBucketSize } spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) } } // ReportSpansPerMethod returns a summary of what spans are being stored for each span name. func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { out := make(map[string]internal.PerMethodSummary) ssmu.RLock() defer ssmu.RUnlock() for name, s := range spanStores { s.mu.Lock() p := internal.PerMethodSummary{ Active: len(s.active), } for code, b := range s.errors { p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ ErrorCode: code, Size: b.size(), }) } for i, b := range s.latency { min, max := latencyBucketBounds(i) p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ MinLatency: min, MaxLatency: max, Size: b.size(), }) } s.mu.Unlock() out[name] = p } return out } // ReportSpansByLatency returns a sample of successful spans. // // minLatency is the minimum latency of spans to be returned. // maxLatency, if nonzero, is the maximum latency of spans to be returned. func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { s := spanStoreForName(name) if s == nil { return nil } var out []*SpanData s.mu.Lock() defer s.mu.Unlock() for i, b := range s.latency { min, max := latencyBucketBounds(i) if i+1 != len(s.latency) && max <= minLatency { continue } if maxLatency != 0 && maxLatency < min { continue } for _, sd := range b.buffer { if sd == nil { break } if minLatency != 0 || maxLatency != 0 { d := sd.EndTime.Sub(sd.StartTime) if d < minLatency { continue } if maxLatency != 0 && d > maxLatency { continue } } out = append(out, sd) } } return out } // spanStore keeps track of spans stored for a particular span name. // // It contains all active spans; a sample of spans for failed requests, // categorized by error code; and a sample of spans for successful requests, // bucketed by latency. type spanStore struct { mu sync.Mutex // protects everything below. active map[SpanInterface]struct{} errors map[int32]*bucket latency []bucket maxSpansPerErrorBucket int } // newSpanStore creates a span store. func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { s := &spanStore{ active: make(map[SpanInterface]struct{}), latency: make([]bucket, len(defaultLatencies)+1), maxSpansPerErrorBucket: errorBucketSize, } for i := range s.latency { s.latency[i] = makeBucket(latencyBucketSize) } return s } // spanStoreForName returns the spanStore for the given name. // // It returns nil if it doesn't exist. func spanStoreForName(name string) *spanStore { var s *spanStore ssmu.RLock() s, _ = spanStores[name] ssmu.RUnlock() return s } // spanStoreForNameCreateIfNew returns the spanStore for the given name. // // It creates it if it didn't exist. func spanStoreForNameCreateIfNew(name string) *spanStore { ssmu.RLock() s, ok := spanStores[name] ssmu.RUnlock() if ok { return s } ssmu.Lock() defer ssmu.Unlock() s, ok = spanStores[name] if ok { return s } s = newSpanStore(name, defaultBucketSize, defaultBucketSize) spanStores[name] = s return s } // spanStoreSetSize resizes the spanStore for the given name. // // It creates it if it didn't exist. func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { ssmu.RLock() s, ok := spanStores[name] ssmu.RUnlock() if ok { s.resize(latencyBucketSize, errorBucketSize) return } ssmu.Lock() defer ssmu.Unlock() s, ok = spanStores[name] if ok { s.resize(latencyBucketSize, errorBucketSize) return } s = newSpanStore(name, latencyBucketSize, errorBucketSize) spanStores[name] = s } func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { s.mu.Lock() for i := range s.latency { s.latency[i].resize(latencyBucketSize) } for _, b := range s.errors { b.resize(errorBucketSize) } s.maxSpansPerErrorBucket = errorBucketSize s.mu.Unlock() } // add adds a span to the active bucket of the spanStore. func (s *spanStore) add(span SpanInterface) { s.mu.Lock() s.active[span] = struct{}{} s.mu.Unlock() } // finished removes a span from the active set, and adds a corresponding // SpanData to a latency or error bucket. func (s *spanStore) finished(span SpanInterface, sd *SpanData) { latency := sd.EndTime.Sub(sd.StartTime) if latency < 0 { latency = 0 } code := sd.Status.Code s.mu.Lock() delete(s.active, span) if code == 0 { s.latency[latencyBucket(latency)].add(sd) } else { if s.errors == nil { s.errors = make(map[int32]*bucket) } if b := s.errors[code]; b != nil { b.add(sd) } else { b := makeBucket(s.maxSpansPerErrorBucket) s.errors[code] = &b b.add(sd) } } s.mu.Unlock() } opencensus-go-0.24.0/trace/status_codes.go000066400000000000000000000025621433102037600205130ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace // Status codes for use with Span.SetStatus. These correspond to the status // codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto const ( StatusCodeOK = 0 StatusCodeCancelled = 1 StatusCodeUnknown = 2 StatusCodeInvalidArgument = 3 StatusCodeDeadlineExceeded = 4 StatusCodeNotFound = 5 StatusCodeAlreadyExists = 6 StatusCodePermissionDenied = 7 StatusCodeResourceExhausted = 8 StatusCodeFailedPrecondition = 9 StatusCodeAborted = 10 StatusCodeOutOfRange = 11 StatusCodeUnimplemented = 12 StatusCodeInternal = 13 StatusCodeUnavailable = 14 StatusCodeDataLoss = 15 StatusCodeUnauthenticated = 16 ) opencensus-go-0.24.0/trace/trace.go000066400000000000000000000414101433102037600171040ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" crand "crypto/rand" "encoding/binary" "fmt" "math/rand" "sync" "sync/atomic" "time" "go.opencensus.io/internal" "go.opencensus.io/trace/tracestate" ) type tracer struct{} var _ Tracer = &tracer{} // Span represents a span of a trace. It has an associated SpanContext, and // stores data accumulated while the span is active. // // Ideally users should interact with Spans by calling the functions in this // package that take a Context parameter. type span struct { // data contains information recorded about the span. // // It will be non-nil if we are exporting the span or recording events for it. // Otherwise, data is nil, and the Span is simply a carrier for the // SpanContext, so that the trace ID is propagated. data *SpanData mu sync.Mutex // protects the contents of *data (but not the pointer value.) spanContext SpanContext // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry // is removed to create room for a new entry. lruAttributes *lruMap // annotations are stored in FIFO queue capped by configured limit. annotations *evictedQueue // messageEvents are stored in FIFO queue capped by configured limit. messageEvents *evictedQueue // links are stored in FIFO queue capped by configured limit. links *evictedQueue // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. *spanStore endOnce sync.Once executionTracerTaskEnd func() // ends the execution tracer span } // IsRecordingEvents returns true if events are being recorded for this span. // Use this check to avoid computing expensive annotations when they will never // be used. func (s *span) IsRecordingEvents() bool { if s == nil { return false } return s.data != nil } // TraceOptions contains options associated with a trace span. type TraceOptions uint32 // IsSampled returns true if the span will be exported. func (sc SpanContext) IsSampled() bool { return sc.TraceOptions.IsSampled() } // setIsSampled sets the TraceOptions bit that determines whether the span will be exported. func (sc *SpanContext) setIsSampled(sampled bool) { if sampled { sc.TraceOptions |= 1 } else { sc.TraceOptions &= ^TraceOptions(1) } } // IsSampled returns true if the span will be exported. func (t TraceOptions) IsSampled() bool { return t&1 == 1 } // SpanContext contains the state that must propagate across process boundaries. // // SpanContext is not an implementation of context.Context. // TODO: add reference to external Census docs for SpanContext. type SpanContext struct { TraceID TraceID SpanID SpanID TraceOptions TraceOptions Tracestate *tracestate.Tracestate } type contextKey struct{} // FromContext returns the Span stored in a context, or nil if there isn't one. func (t *tracer) FromContext(ctx context.Context) *Span { s, _ := ctx.Value(contextKey{}).(*Span) return s } // NewContext returns a new context with the given Span attached. func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) } // All available span kinds. Span kind must be either one of these values. const ( SpanKindUnspecified = iota SpanKindServer SpanKindClient ) // StartOptions contains options concerning how a span is started. type StartOptions struct { // Sampler to consult for this Span. If provided, it is always consulted. // // If not provided, then the behavior differs based on whether // the parent of this Span is remote, local, or there is no parent. // In the case of a remote parent or no parent, the // default sampler (see Config) will be consulted. Otherwise, // when there is a non-remote parent, no new sampling decision will be made: // we will preserve the sampling of the parent. Sampler Sampler // SpanKind represents the kind of a span. If none is set, // SpanKindUnspecified is used. SpanKind int } // StartOption apply changes to StartOptions. type StartOption func(*StartOptions) // WithSpanKind makes new spans to be created with the given kind. func WithSpanKind(spanKind int) StartOption { return func(o *StartOptions) { o.SpanKind = spanKind } } // WithSampler makes new spans to be be created with a custom sampler. // Otherwise, the global sampler is used. func WithSampler(sampler Sampler) StartOption { return func(o *StartOptions) { o.Sampler = sampler } } // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, creates a new trace and span. // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext if p := t.FromContext(ctx); p != nil { if ps, ok := p.internal.(*span); ok { ps.addChild() } parent = p.SpanContext() } for _, op := range o { op(&opts) } span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end extSpan := NewSpan(span) return t.NewContext(ctx, extSpan), extSpan } // StartSpanWithRemoteParent starts a new child span of the span from the given parent. // // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is // preferred for cases where the parent is propagated via an incoming request. // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { var opts StartOptions for _, op := range o { op(&opts) } span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end extSpan := NewSpan(span) return t.NewContext(ctx, extSpan), extSpan } func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { s := &span{} s.spanContext = parent cfg := config.Load().(*Config) if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { // lazy initialization gen.init() } if !hasParent { s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() } s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() sampler := cfg.DefaultSampler if !hasParent || remoteParent || o.Sampler != nil { // If this span is the child of a local span and no Sampler is set in the // options, keep the parent's TraceOptions. // // Otherwise, consult the Sampler in the options if it is non-nil, otherwise // the default sampler. if o.Sampler != nil { sampler = o.Sampler } s.spanContext.setIsSampled(sampler(SamplingParameters{ ParentContext: parent, TraceID: s.spanContext.TraceID, SpanID: s.spanContext.SpanID, Name: name, HasRemoteParent: remoteParent}).Sample) } if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { return s } s.data = &SpanData{ SpanContext: s.spanContext, StartTime: time.Now(), SpanKind: o.SpanKind, Name: name, HasRemoteParent: remoteParent, } s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) s.links = newEvictedQueue(cfg.MaxLinksPerSpan) if hasParent { s.data.ParentSpanID = parent.SpanID } if internal.LocalSpanStoreEnabled { var ss *spanStore ss = spanStoreForNameCreateIfNew(name) if ss != nil { s.spanStore = ss ss.add(s) } } return s } // End ends the span. func (s *span) End() { if s == nil { return } if s.executionTracerTaskEnd != nil { s.executionTracerTaskEnd() } if !s.IsRecordingEvents() { return } s.endOnce.Do(func() { exp, _ := exporters.Load().(exportersMap) mustExport := s.spanContext.IsSampled() && len(exp) > 0 if s.spanStore != nil || mustExport { sd := s.makeSpanData() sd.EndTime = internal.MonotonicEndTime(sd.StartTime) if s.spanStore != nil { s.spanStore.finished(s, sd) } if mustExport { for e := range exp { e.ExportSpan(sd) } } } }) } // makeSpanData produces a SpanData representing the current state of the Span. // It requires that s.data is non-nil. func (s *span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data if s.lruAttributes.len() > 0 { sd.Attributes = s.lruAttributesToAttributeMap() sd.DroppedAttributeCount = s.lruAttributes.droppedCount } if len(s.annotations.queue) > 0 { sd.Annotations = s.interfaceArrayToAnnotationArray() sd.DroppedAnnotationCount = s.annotations.droppedCount } if len(s.messageEvents.queue) > 0 { sd.MessageEvents = s.interfaceArrayToMessageEventArray() sd.DroppedMessageEventCount = s.messageEvents.droppedCount } if len(s.links.queue) > 0 { sd.Links = s.interfaceArrayToLinksArray() sd.DroppedLinkCount = s.links.droppedCount } s.mu.Unlock() return &sd } // SpanContext returns the SpanContext of the span. func (s *span) SpanContext() SpanContext { if s == nil { return SpanContext{} } return s.spanContext } // SetName sets the name of the span, if it is recording events. func (s *span) SetName(name string) { if !s.IsRecordingEvents() { return } s.mu.Lock() s.data.Name = name s.mu.Unlock() } // SetStatus sets the status of the span, if it is recording events. func (s *span) SetStatus(status Status) { if !s.IsRecordingEvents() { return } s.mu.Lock() s.data.Status = status s.mu.Unlock() } func (s *span) interfaceArrayToLinksArray() []Link { linksArr := make([]Link, 0, len(s.links.queue)) for _, value := range s.links.queue { linksArr = append(linksArr, value.(Link)) } return linksArr } func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) for _, value := range s.messageEvents.queue { messageEventArr = append(messageEventArr, value.(MessageEvent)) } return messageEventArr } func (s *span) interfaceArrayToAnnotationArray() []Annotation { annotationArr := make([]Annotation, 0, len(s.annotations.queue)) for _, value := range s.annotations.queue { annotationArr = append(annotationArr, value.(Annotation)) } return annotationArr } func (s *span) lruAttributesToAttributeMap() map[string]interface{} { attributes := make(map[string]interface{}, s.lruAttributes.len()) for _, key := range s.lruAttributes.keys() { value, ok := s.lruAttributes.get(key) if ok { keyStr := key.(string) attributes[keyStr] = value } } return attributes } func (s *span) copyToCappedAttributes(attributes []Attribute) { for _, a := range attributes { s.lruAttributes.add(a.key, a.value) } } func (s *span) addChild() { if !s.IsRecordingEvents() { return } s.mu.Lock() s.data.ChildSpanCount++ s.mu.Unlock() } // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. func (s *span) AddAttributes(attributes ...Attribute) { if !s.IsRecordingEvents() { return } s.mu.Lock() s.copyToCappedAttributes(attributes) s.mu.Unlock() } func (s *span) printStringInternal(attributes []Attribute, str string) { now := time.Now() var am map[string]interface{} if len(attributes) != 0 { am = make(map[string]interface{}, len(attributes)) for _, attr := range attributes { am[attr.key] = attr.value } } s.mu.Lock() s.annotations.add(Annotation{ Time: now, Message: str, Attributes: am, }) s.mu.Unlock() } // Annotate adds an annotation with attributes. // Attributes can be nil. func (s *span) Annotate(attributes []Attribute, str string) { if !s.IsRecordingEvents() { return } s.printStringInternal(attributes, str) } // Annotatef adds an annotation with attributes. func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { if !s.IsRecordingEvents() { return } s.printStringInternal(attributes, fmt.Sprintf(format, a...)) } // AddMessageSendEvent adds a message send event to the span. // // messageID is an identifier for the message, which is recommended to be // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } now := time.Now() s.mu.Lock() s.messageEvents.add(MessageEvent{ Time: now, EventType: MessageEventTypeSent, MessageID: messageID, UncompressedByteSize: uncompressedByteSize, CompressedByteSize: compressedByteSize, }) s.mu.Unlock() } // AddMessageReceiveEvent adds a message receive event to the span. // // messageID is an identifier for the message, which is recommended to be // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } now := time.Now() s.mu.Lock() s.messageEvents.add(MessageEvent{ Time: now, EventType: MessageEventTypeRecv, MessageID: messageID, UncompressedByteSize: uncompressedByteSize, CompressedByteSize: compressedByteSize, }) s.mu.Unlock() } // AddLink adds a link to the span. func (s *span) AddLink(l Link) { if !s.IsRecordingEvents() { return } s.mu.Lock() s.links.add(l) s.mu.Unlock() } func (s *span) String() string { if s == nil { return "" } if s.data == nil { return fmt.Sprintf("span %s", s.spanContext.SpanID) } s.mu.Lock() str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) s.mu.Unlock() return str } var config atomic.Value // access atomically func init() { config.Store(&Config{ DefaultSampler: ProbabilitySampler(defaultSamplingProbability), IDGenerator: &defaultIDGenerator{}, MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, MaxLinksPerSpan: DefaultMaxLinksPerSpan, }) } type defaultIDGenerator struct { sync.Mutex // Please keep these as the first fields // so that these 8 byte fields will be aligned on addresses // divisible by 8, on both 32-bit and 64-bit machines when // performing atomic increments and accesses. // See: // * https://github.com/census-instrumentation/opencensus-go/issues/587 // * https://github.com/census-instrumentation/opencensus-go/issues/865 // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG nextSpanID uint64 spanIDInc uint64 traceIDAdd [2]uint64 traceIDRand *rand.Rand initOnce sync.Once } // init initializes the generator on the first call to avoid consuming entropy // unnecessarily. func (gen *defaultIDGenerator) init() { gen.initOnce.Do(func() { // initialize traceID and spanID generators. var rngSeed int64 for _, p := range []interface{}{ &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, } { binary.Read(crand.Reader, binary.LittleEndian, p) } gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) gen.spanIDInc |= 1 }) } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. func (gen *defaultIDGenerator) NewSpanID() [8]byte { var id uint64 for id == 0 { id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) } var sid [8]byte binary.LittleEndian.PutUint64(sid[:], id) return sid } // NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. // mu should be held while this function is called. func (gen *defaultIDGenerator) NewTraceID() [16]byte { var tid [16]byte // Construct the trace ID from two outputs of traceIDRand, with a constant // added to each half for additional entropy. gen.Lock() binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) gen.Unlock() return tid } opencensus-go-0.24.0/trace/trace_api.go000066400000000000000000000215421433102037600177410ustar00rootroot00000000000000// Copyright 2020, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" ) // DefaultTracer is the tracer used when package-level exported functions are invoked. var DefaultTracer Tracer = &tracer{} // Tracer can start spans and access context functions. type Tracer interface { // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, creates a new trace and span. // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) // StartSpanWithRemoteParent starts a new child span of the span from the given parent. // // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is // preferred for cases where the parent is propagated via an incoming request. // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) // FromContext returns the Span stored in a context, or nil if there isn't one. FromContext(ctx context.Context) *Span // NewContext returns a new context with the given Span attached. NewContext(parent context.Context, s *Span) context.Context } // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, creates a new trace and span. // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { return DefaultTracer.StartSpan(ctx, name, o...) } // StartSpanWithRemoteParent starts a new child span of the span from the given parent. // // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is // preferred for cases where the parent is propagated via an incoming request. // // Returned context contains the newly created span. You can use it to // propagate the returned span in process. func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) } // FromContext returns the Span stored in a context, or a Span that is not // recording events if there isn't one. func FromContext(ctx context.Context) *Span { return DefaultTracer.FromContext(ctx) } // NewContext returns a new context with the given Span attached. func NewContext(parent context.Context, s *Span) context.Context { return DefaultTracer.NewContext(parent, s) } // SpanInterface represents a span of a trace. It has an associated SpanContext, and // stores data accumulated while the span is active. // // Ideally users should interact with Spans by calling the functions in this // package that take a Context parameter. type SpanInterface interface { // IsRecordingEvents returns true if events are being recorded for this span. // Use this check to avoid computing expensive annotations when they will never // be used. IsRecordingEvents() bool // End ends the span. End() // SpanContext returns the SpanContext of the span. SpanContext() SpanContext // SetName sets the name of the span, if it is recording events. SetName(name string) // SetStatus sets the status of the span, if it is recording events. SetStatus(status Status) // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. AddAttributes(attributes ...Attribute) // Annotate adds an annotation with attributes. // Attributes can be nil. Annotate(attributes []Attribute, str string) // Annotatef adds an annotation with attributes. Annotatef(attributes []Attribute, format string, a ...interface{}) // AddMessageSendEvent adds a message send event to the span. // // messageID is an identifier for the message, which is recommended to be // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) // AddMessageReceiveEvent adds a message receive event to the span. // // messageID is an identifier for the message, which is recommended to be // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) // AddLink adds a link to the span. AddLink(l Link) // String prints a string representation of a span. String() string } // NewSpan is a convenience function for creating a *Span out of a *span func NewSpan(s SpanInterface) *Span { return &Span{internal: s} } // Span is a struct wrapper around the SpanInt interface, which allows correctly handling // nil spans, while also allowing the SpanInterface implementation to be swapped out. type Span struct { internal SpanInterface } // Internal returns the underlying implementation of the Span func (s *Span) Internal() SpanInterface { return s.internal } // IsRecordingEvents returns true if events are being recorded for this span. // Use this check to avoid computing expensive annotations when they will never // be used. func (s *Span) IsRecordingEvents() bool { if s == nil { return false } return s.internal.IsRecordingEvents() } // End ends the span. func (s *Span) End() { if s == nil { return } s.internal.End() } // SpanContext returns the SpanContext of the span. func (s *Span) SpanContext() SpanContext { if s == nil { return SpanContext{} } return s.internal.SpanContext() } // SetName sets the name of the span, if it is recording events. func (s *Span) SetName(name string) { if !s.IsRecordingEvents() { return } s.internal.SetName(name) } // SetStatus sets the status of the span, if it is recording events. func (s *Span) SetStatus(status Status) { if !s.IsRecordingEvents() { return } s.internal.SetStatus(status) } // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. func (s *Span) AddAttributes(attributes ...Attribute) { if !s.IsRecordingEvents() { return } s.internal.AddAttributes(attributes...) } // Annotate adds an annotation with attributes. // Attributes can be nil. func (s *Span) Annotate(attributes []Attribute, str string) { if !s.IsRecordingEvents() { return } s.internal.Annotate(attributes, str) } // Annotatef adds an annotation with attributes. func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { if !s.IsRecordingEvents() { return } s.internal.Annotatef(attributes, format, a...) } // AddMessageSendEvent adds a message send event to the span. // // messageID is an identifier for the message, which is recommended to be // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) } // AddMessageReceiveEvent adds a message receive event to the span. // // messageID is an identifier for the message, which is recommended to be // unique in this span and the same between the send event and the receive // event (this allows to identify a message between the sender and receiver). // For example, this could be a sequence id. func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { if !s.IsRecordingEvents() { return } s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) } // AddLink adds a link to the span. func (s *Span) AddLink(l Link) { if !s.IsRecordingEvents() { return } s.internal.AddLink(l) } // String prints a string representation of a span. func (s *Span) String() string { if s == nil { return "" } return s.internal.String() } opencensus-go-0.24.0/trace/trace_go11.go000066400000000000000000000017001433102037600177310ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build go1.11 // +build go1.11 package trace import ( "context" t "runtime/trace" ) func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { if !t.IsEnabled() { // Avoid additional overhead if // runtime/trace is not enabled. return ctx, func() {} } nctx, task := t.NewTask(ctx, name) return nctx, task.End } opencensus-go-0.24.0/trace/trace_nongo11.go000066400000000000000000000014261433102037600204510ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //go:build !go1.11 // +build !go1.11 package trace import ( "context" ) func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { return ctx, func() {} } opencensus-go-0.24.0/trace/trace_test.go000066400000000000000000000606261433102037600201550ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "fmt" "reflect" "sync/atomic" "testing" "time" "go.opencensus.io/trace/tracestate" ) var ( tid = TraceID{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 4, 8, 16, 32, 64, 128} sid = SpanID{1, 2, 4, 8, 16, 32, 64, 128} testTracestate, _ = tracestate.New(nil, tracestate.Entry{Key: "foo", Value: "bar"}) ) func init() { // no random sampling, but sample children of sampled spans. ApplyConfig(Config{DefaultSampler: ProbabilitySampler(0)}) } func TestStrings(t *testing.T) { if got, want := tid.String(), "01020304050607080102040810204080"; got != want { t.Errorf("TraceID.String: got %q want %q", got, want) } if got, want := sid.String(), "0102040810204080"; got != want { t.Errorf("SpanID.String: got %q want %q", got, want) } } func TestFromContext(t *testing.T) { want := &Span{} ctx := NewContext(context.Background(), want) got := FromContext(ctx) if got != want { t.Errorf("got Span pointer %p want %p", got, want) } } type foo int func (f foo) String() string { return "foo" } // checkChild tests that c has fields set appropriately, given that it is a child span of p. func checkChild(p SpanContext, c *Span) error { if c == nil { return fmt.Errorf("got nil child span, want non-nil") } if got, want := c.SpanContext().TraceID, p.TraceID; got != want { return fmt.Errorf("got child trace ID %s, want %s", got, want) } if childID, parentID := c.SpanContext().SpanID, p.SpanID; childID == parentID { return fmt.Errorf("got child span ID %s, parent span ID %s; want unequal IDs", childID, parentID) } if got, want := c.SpanContext().TraceOptions, p.TraceOptions; got != want { return fmt.Errorf("got child trace options %d, want %d", got, want) } if got, want := c.SpanContext().Tracestate, p.Tracestate; got != want { return fmt.Errorf("got child tracestate %v, want %v", got, want) } return nil } func TestStartSpan(t *testing.T) { ctx, _ := StartSpan(context.Background(), "StartSpan") s := FromContext(ctx).internal.(*span) if s.data != nil { t.Error("StartSpan: new span is recording events") } } func TestSampling(t *testing.T) { for _, test := range []struct { remoteParent bool localParent bool parentTraceOptions TraceOptions sampler Sampler wantTraceOptions TraceOptions }{ {true, false, 0, nil, 0}, {true, false, 1, nil, 1}, {true, false, 0, NeverSample(), 0}, {true, false, 1, NeverSample(), 0}, {true, false, 0, AlwaysSample(), 1}, {true, false, 1, AlwaysSample(), 1}, {false, true, 0, NeverSample(), 0}, {false, true, 1, NeverSample(), 0}, {false, true, 0, AlwaysSample(), 1}, {false, true, 1, AlwaysSample(), 1}, {false, false, 0, nil, 0}, {false, false, 0, NeverSample(), 0}, {false, false, 0, AlwaysSample(), 1}, } { var ctx context.Context if test.remoteParent { sc := SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: test.parentTraceOptions, } ctx, _ = StartSpanWithRemoteParent(context.Background(), "foo", sc, WithSampler(test.sampler)) } else if test.localParent { sampler := NeverSample() if test.parentTraceOptions == 1 { sampler = AlwaysSample() } ctx2, _ := StartSpan(context.Background(), "foo", WithSampler(sampler)) ctx, _ = StartSpan(ctx2, "foo", WithSampler(test.sampler)) } else { ctx, _ = StartSpan(context.Background(), "foo", WithSampler(test.sampler)) } sc := FromContext(ctx).SpanContext() if (sc == SpanContext{}) { t.Errorf("case %#v: starting new span: no span in context", test) continue } if sc.SpanID == (SpanID{}) { t.Errorf("case %#v: starting new span: got zero SpanID, want nonzero", test) } if sc.TraceOptions != test.wantTraceOptions { t.Errorf("case %#v: starting new span: got TraceOptions %x, want %x", test, sc.TraceOptions, test.wantTraceOptions) } } // Test that for children of local spans, the default sampler has no effect. for _, test := range []struct { parentTraceOptions TraceOptions wantTraceOptions TraceOptions }{ {0, 0}, {0, 0}, {1, 1}, {1, 1}, } { for _, defaultSampler := range []Sampler{ NeverSample(), AlwaysSample(), ProbabilitySampler(0), } { ApplyConfig(Config{DefaultSampler: defaultSampler}) sampler := NeverSample() if test.parentTraceOptions == 1 { sampler = AlwaysSample() } ctx2, _ := StartSpan(context.Background(), "foo", WithSampler(sampler)) ctx, _ := StartSpan(ctx2, "foo") sc := FromContext(ctx).SpanContext() if (sc == SpanContext{}) { t.Errorf("case %#v: starting new child of local span: no span in context", test) continue } if sc.SpanID == (SpanID{}) { t.Errorf("case %#v: starting new child of local span: got zero SpanID, want nonzero", test) } if sc.TraceOptions != test.wantTraceOptions { t.Errorf("case %#v: starting new child of local span: got TraceOptions %x, want %x", test, sc.TraceOptions, test.wantTraceOptions) } } } ApplyConfig(Config{DefaultSampler: ProbabilitySampler(0)}) // reset the default sampler. } func TestProbabilitySampler(t *testing.T) { exported := 0 for i := 0; i < 1000; i++ { _, span := StartSpan(context.Background(), "foo", WithSampler(ProbabilitySampler(0.3))) if span.SpanContext().IsSampled() { exported++ } } if exported < 200 || exported > 400 { t.Errorf("got %f%% exported spans, want approximately 30%%", float64(exported)*0.1) } } func TestStartSpanWithRemoteParent(t *testing.T) { sc := SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: 0x0, } ctx, _ := StartSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc) if err := checkChild(sc, FromContext(ctx)); err != nil { t.Error(err) } ctx, _ = StartSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc) if err := checkChild(sc, FromContext(ctx)); err != nil { t.Error(err) } sc = SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: 0x1, Tracestate: testTracestate, } ctx, _ = StartSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc) if err := checkChild(sc, FromContext(ctx)); err != nil { t.Error(err) } ctx, _ = StartSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc) if err := checkChild(sc, FromContext(ctx)); err != nil { t.Error(err) } ctx2, _ := StartSpan(ctx, "StartSpan") parent := FromContext(ctx).SpanContext() if err := checkChild(parent, FromContext(ctx2)); err != nil { t.Error(err) } } // startSpan returns a context with a new Span that is recording events and will be exported. func startSpan(o StartOptions) *Span { _, span := StartSpanWithRemoteParent(context.Background(), "span0", SpanContext{ TraceID: tid, SpanID: sid, TraceOptions: 1, }, WithSampler(o.Sampler), WithSpanKind(o.SpanKind), ) return span } type testExporter struct { spans []*SpanData } func (t *testExporter) ExportSpan(s *SpanData) { t.spans = append(t.spans, s) } // endSpan ends the Span in the context and returns the exported SpanData. // // It also does some tests on the Span, and tests and clears some fields in the SpanData. func endSpan(span *Span) (*SpanData, error) { if !span.IsRecordingEvents() { return nil, fmt.Errorf("IsRecordingEvents: got false, want true") } if !span.SpanContext().IsSampled() { return nil, fmt.Errorf("IsSampled: got false, want true") } var te testExporter RegisterExporter(&te) span.End() UnregisterExporter(&te) if len(te.spans) != 1 { return nil, fmt.Errorf("got exported spans %#v, want one span", te.spans) } got := te.spans[0] if got.SpanContext.SpanID == (SpanID{}) { return nil, fmt.Errorf("exporting span: expected nonzero SpanID") } got.SpanContext.SpanID = SpanID{} if !checkTime(&got.StartTime) { return nil, fmt.Errorf("exporting span: expected nonzero StartTime") } if !checkTime(&got.EndTime) { return nil, fmt.Errorf("exporting span: expected nonzero EndTime") } return got, nil } // checkTime checks that a nonzero time was set in x, then clears it. func checkTime(x *time.Time) bool { if x.IsZero() { return false } *x = time.Time{} return true } func TestSpanKind(t *testing.T) { tests := []struct { name string startOptions StartOptions want *SpanData }{ { name: "zero StartOptions", startOptions: StartOptions{}, want: &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", SpanKind: SpanKindUnspecified, HasRemoteParent: true, }, }, { name: "client span", startOptions: StartOptions{ SpanKind: SpanKindClient, }, want: &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", SpanKind: SpanKindClient, HasRemoteParent: true, }, }, { name: "server span", startOptions: StartOptions{ SpanKind: SpanKindServer, }, want: &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", SpanKind: SpanKindServer, HasRemoteParent: true, }, }, } for _, tt := range tests { span := startSpan(tt.startOptions) got, err := endSpan(span) if err != nil { t.Fatal(err) } if !reflect.DeepEqual(got, tt.want) { t.Errorf("exporting span: got %#v want %#v", got, tt.want) } } } func TestSetSpanAttributes(t *testing.T) { span := startSpan(StartOptions{}) span.AddAttributes(StringAttribute("key1", "value1")) got, err := endSpan(span) if err != nil { t.Fatal(err) } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", Attributes: map[string]interface{}{"key1": "value1"}, HasRemoteParent: true, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestSetSpanAttributesOverLimit(t *testing.T) { cfg := Config{MaxAttributesPerSpan: 2} ApplyConfig(cfg) span := startSpan(StartOptions{}) span.AddAttributes(StringAttribute("key1", "value1")) span.AddAttributes(StringAttribute("key2", "value2")) span.AddAttributes(StringAttribute("key1", "value3")) // Replace key1. span.AddAttributes(StringAttribute("key4", "value4")) // Remove key2 and add key4 got, err := endSpan(span) if err != nil { t.Fatal(err) } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", Attributes: map[string]interface{}{"key1": "value3", "key4": "value4"}, HasRemoteParent: true, DroppedAttributeCount: 1, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestAnnotations(t *testing.T) { span := startSpan(StartOptions{}) span.Annotatef([]Attribute{StringAttribute("key1", "value1")}, "%f", 1.5) span.Annotate([]Attribute{StringAttribute("key2", "value2")}, "Annotate") got, err := endSpan(span) if err != nil { t.Fatal(err) } for i := range got.Annotations { if !checkTime(&got.Annotations[i].Time) { t.Error("exporting span: expected nonzero Annotation Time") } } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", Annotations: []Annotation{ {Message: "1.500000", Attributes: map[string]interface{}{"key1": "value1"}}, {Message: "Annotate", Attributes: map[string]interface{}{"key2": "value2"}}, }, HasRemoteParent: true, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestAnnotationsOverLimit(t *testing.T) { cfg := Config{MaxAnnotationEventsPerSpan: 2} ApplyConfig(cfg) span := startSpan(StartOptions{}) span.Annotatef([]Attribute{StringAttribute("key4", "value4")}, "%d", 1) span.Annotate([]Attribute{StringAttribute("key3", "value3")}, "Annotate oldest") span.Annotatef([]Attribute{StringAttribute("key1", "value1")}, "%f", 1.5) span.Annotate([]Attribute{StringAttribute("key2", "value2")}, "Annotate") got, err := endSpan(span) if err != nil { t.Fatal(err) } for i := range got.Annotations { if !checkTime(&got.Annotations[i].Time) { t.Error("exporting span: expected nonzero Annotation Time") } } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", Annotations: []Annotation{ {Message: "1.500000", Attributes: map[string]interface{}{"key1": "value1"}}, {Message: "Annotate", Attributes: map[string]interface{}{"key2": "value2"}}, }, DroppedAnnotationCount: 2, HasRemoteParent: true, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestMessageEvents(t *testing.T) { span := startSpan(StartOptions{}) span.AddMessageReceiveEvent(3, 400, 300) span.AddMessageSendEvent(1, 200, 100) got, err := endSpan(span) if err != nil { t.Fatal(err) } for i := range got.MessageEvents { if !checkTime(&got.MessageEvents[i].Time) { t.Error("exporting span: expected nonzero MessageEvent Time") } } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", MessageEvents: []MessageEvent{ {EventType: 2, MessageID: 0x3, UncompressedByteSize: 0x190, CompressedByteSize: 0x12c}, {EventType: 1, MessageID: 0x1, UncompressedByteSize: 0xc8, CompressedByteSize: 0x64}, }, HasRemoteParent: true, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestMessageEventsOverLimit(t *testing.T) { cfg := Config{MaxMessageEventsPerSpan: 2} ApplyConfig(cfg) span := startSpan(StartOptions{}) span.AddMessageReceiveEvent(5, 300, 120) span.AddMessageSendEvent(4, 100, 50) span.AddMessageReceiveEvent(3, 400, 300) span.AddMessageSendEvent(1, 200, 100) got, err := endSpan(span) if err != nil { t.Fatal(err) } for i := range got.MessageEvents { if !checkTime(&got.MessageEvents[i].Time) { t.Error("exporting span: expected nonzero MessageEvent Time") } } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", MessageEvents: []MessageEvent{ {EventType: 2, MessageID: 0x3, UncompressedByteSize: 0x190, CompressedByteSize: 0x12c}, {EventType: 1, MessageID: 0x1, UncompressedByteSize: 0xc8, CompressedByteSize: 0x64}, }, DroppedMessageEventCount: 2, HasRemoteParent: true, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestSetSpanName(t *testing.T) { want := "SpanName-1" span := startSpan(StartOptions{}) span.SetName(want) got, err := endSpan(span) if err != nil { t.Fatal(err) } if got.Name != want { t.Errorf("span.Name=%q; want %q", got.Name, want) } } func TestSetSpanNameUnsampledSpan(t *testing.T) { var nilSpanData *SpanData s := startSpan(StartOptions{Sampler: NeverSample()}) s.SetName("NoopName") sp := s.internal.(*span) if want, got := nilSpanData, sp.data; want != got { t.Errorf("span.data=%+v; want %+v", got, want) } } func TestSetSpanNameAfterSpanEnd(t *testing.T) { want := "SpanName-2" span := startSpan(StartOptions{}) span.SetName(want) got, err := endSpan(span) if err != nil { t.Fatal(err) } // updating name after span.End span.SetName("NoopName") // exported span should not be updated by previous call to SetName if got.Name != want { t.Errorf("span.Name=%q; want %q", got.Name, want) } // span should not be exported again var te testExporter RegisterExporter(&te) span.End() UnregisterExporter(&te) if len(te.spans) != 0 { t.Errorf("got exported spans %#v, wanted no spans", te.spans) } } func TestSetSpanStatus(t *testing.T) { span := startSpan(StartOptions{}) span.SetStatus(Status{Code: int32(1), Message: "request failed"}) got, err := endSpan(span) if err != nil { t.Fatal(err) } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", Status: Status{Code: 1, Message: "request failed"}, HasRemoteParent: true, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestAddLink(t *testing.T) { span := startSpan(StartOptions{}) span.AddLink(Link{ TraceID: tid, SpanID: sid, Type: LinkTypeParent, Attributes: map[string]interface{}{"key5": "value5"}, }) got, err := endSpan(span) if err != nil { t.Fatal(err) } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", Links: []Link{{ TraceID: tid, SpanID: sid, Type: 2, Attributes: map[string]interface{}{"key5": "value5"}, }}, HasRemoteParent: true, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestAddLinkOverLimit(t *testing.T) { cfg := Config{MaxLinksPerSpan: 1} ApplyConfig(cfg) span := startSpan(StartOptions{}) span.AddLink(Link{ TraceID: tid, SpanID: sid, Type: LinkTypeParent, Attributes: map[string]interface{}{"key4": "value4"}, }) span.AddLink(Link{ TraceID: tid, SpanID: sid, Type: LinkTypeParent, Attributes: map[string]interface{}{"key5": "value5"}, }) got, err := endSpan(span) if err != nil { t.Fatal(err) } want := &SpanData{ SpanContext: SpanContext{ TraceID: tid, SpanID: SpanID{}, TraceOptions: 0x1, }, ParentSpanID: sid, Name: "span0", Links: []Link{{ TraceID: tid, SpanID: sid, Type: 2, Attributes: map[string]interface{}{"key5": "value5"}, }}, DroppedLinkCount: 1, HasRemoteParent: true, } if !reflect.DeepEqual(got, want) { t.Errorf("exporting span: got %#v want %#v", got, want) } } func TestUnregisterExporter(t *testing.T) { var te testExporter RegisterExporter(&te) UnregisterExporter(&te) ctx := startSpan(StartOptions{}) endSpan(ctx) if len(te.spans) != 0 { t.Error("unregistered Exporter was called") } } func TestBucket(t *testing.T) { // make a bucket of size 5 and add 10 spans b := makeBucket(5) for i := 1; i <= 10; i++ { b.nextTime = time.Time{} // reset the time so that the next span is accepted. // add a span, with i stored in the TraceID so we can test for it later. b.add(&SpanData{SpanContext: SpanContext{TraceID: TraceID{byte(i)}}, EndTime: time.Now()}) if i <= 5 { if b.size() != i { t.Fatalf("got bucket size %d, want %d %#v\n", b.size(), i, b) } for j := 0; j < i; j++ { if b.span(j).TraceID[0] != byte(j+1) { t.Errorf("got span index %d, want %d\n", b.span(j).TraceID[0], j+1) } } } else { if b.size() != 5 { t.Fatalf("got bucket size %d, want 5\n", b.size()) } for j := 0; j < 5; j++ { want := i - 4 + j if b.span(j).TraceID[0] != byte(want) { t.Errorf("got span index %d, want %d\n", b.span(j).TraceID[0], want) } } } } // expand the bucket b.resize(20) if b.size() != 5 { t.Fatalf("after resizing upwards: got bucket size %d, want 5\n", b.size()) } for i := 0; i < 5; i++ { want := 6 + i if b.span(i).TraceID[0] != byte(want) { t.Errorf("after resizing upwards: got span index %d, want %d\n", b.span(i).TraceID[0], want) } } // shrink the bucket b.resize(3) if b.size() != 3 { t.Fatalf("after resizing downwards: got bucket size %d, want 3\n", b.size()) } for i := 0; i < 3; i++ { want := 8 + i if b.span(i).TraceID[0] != byte(want) { t.Errorf("after resizing downwards: got span index %d, want %d\n", b.span(i).TraceID[0], want) } } } type exporter map[string]*SpanData func (e exporter) ExportSpan(s *SpanData) { e[s.Name] = s } func Test_Issue328_EndSpanTwice(t *testing.T) { spans := make(exporter) RegisterExporter(&spans) defer UnregisterExporter(&spans) ctx := context.Background() ctx, span := StartSpan(ctx, "span-1", WithSampler(AlwaysSample())) span.End() span.End() UnregisterExporter(&spans) if len(spans) != 1 { t.Fatalf("expected only a single span, got %#v", spans) } } func TestStartSpanAfterEnd(t *testing.T) { spans := make(exporter) RegisterExporter(&spans) defer UnregisterExporter(&spans) ctx, span0 := StartSpan(context.Background(), "parent", WithSampler(AlwaysSample())) ctx1, span1 := StartSpan(ctx, "span-1", WithSampler(AlwaysSample())) span1.End() // Start a new span with the context containing span-1 // even though span-1 is ended, we still add this as a new child of span-1 _, span2 := StartSpan(ctx1, "span-2", WithSampler(AlwaysSample())) span2.End() span0.End() UnregisterExporter(&spans) if got, want := len(spans), 3; got != want { t.Fatalf("len(%#v) = %d; want %d", spans, got, want) } if got, want := spans["span-1"].TraceID, spans["parent"].TraceID; got != want { t.Errorf("span-1.TraceID=%q; want %q", got, want) } if got, want := spans["span-2"].TraceID, spans["parent"].TraceID; got != want { t.Errorf("span-2.TraceID=%q; want %q", got, want) } if got, want := spans["span-1"].ParentSpanID, spans["parent"].SpanID; got != want { t.Errorf("span-1.ParentSpanID=%q; want %q (parent.SpanID)", got, want) } if got, want := spans["span-2"].ParentSpanID, spans["span-1"].SpanID; got != want { t.Errorf("span-2.ParentSpanID=%q; want %q (span1.SpanID)", got, want) } } func TestChildSpanCount(t *testing.T) { spans := make(exporter) RegisterExporter(&spans) defer UnregisterExporter(&spans) ctx, span0 := StartSpan(context.Background(), "parent", WithSampler(AlwaysSample())) ctx1, span1 := StartSpan(ctx, "span-1", WithSampler(AlwaysSample())) _, span2 := StartSpan(ctx1, "span-2", WithSampler(AlwaysSample())) span2.End() span1.End() _, span3 := StartSpan(ctx, "span-3", WithSampler(AlwaysSample())) span3.End() span0.End() UnregisterExporter(&spans) if got, want := len(spans), 4; got != want { t.Fatalf("len(%#v) = %d; want %d", spans, got, want) } if got, want := spans["span-3"].ChildSpanCount, 0; got != want { t.Errorf("span-3.ChildSpanCount=%q; want %q", got, want) } if got, want := spans["span-2"].ChildSpanCount, 0; got != want { t.Errorf("span-2.ChildSpanCount=%q; want %q", got, want) } if got, want := spans["span-1"].ChildSpanCount, 1; got != want { t.Errorf("span-1.ChildSpanCount=%q; want %q", got, want) } if got, want := spans["parent"].ChildSpanCount, 2; got != want { t.Errorf("parent.ChildSpanCount=%q; want %q", got, want) } } func TestNilSpanEnd(t *testing.T) { var span *Span span.End() } func TestExecutionTracerTaskEnd(t *testing.T) { var n uint64 executionTracerTaskEnd := func() { atomic.AddUint64(&n, 1) } var spans []*span _, s := StartSpan(context.Background(), "foo", WithSampler(NeverSample())) sp := s.internal.(*span) sp.executionTracerTaskEnd = executionTracerTaskEnd spans = append(spans, sp) // never sample _, s = StartSpanWithRemoteParent(context.Background(), "foo", SpanContext{ TraceID: TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, SpanID: SpanID{0, 1, 2, 3, 4, 5, 6, 7}, TraceOptions: 0, }) sp = s.internal.(*span) sp.executionTracerTaskEnd = executionTracerTaskEnd spans = append(spans, sp) // parent not sampled _, s = StartSpan(context.Background(), "foo", WithSampler(AlwaysSample())) sp = s.internal.(*span) sp.executionTracerTaskEnd = executionTracerTaskEnd spans = append(spans, sp) // always sample for _, span := range spans { span.End() } if got, want := n, uint64(len(spans)); got != want { t.Fatalf("Execution tracer task ended for %v spans; want %v", got, want) } } opencensus-go-0.24.0/trace/tracestate/000077500000000000000000000000001433102037600176165ustar00rootroot00000000000000opencensus-go-0.24.0/trace/tracestate/tracestate.go000066400000000000000000000111201433102037600222770ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package tracestate implements support for the Tracestate header of the // W3C TraceContext propagation format. package tracestate import ( "fmt" "regexp" ) const ( keyMaxSize = 256 valueMaxSize = 256 maxKeyValuePairs = 32 ) const ( keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` ) var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) // Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different // vendors propagate additional information and inter-operate with their legacy Id formats. type Tracestate struct { entries []Entry } // Entry represents one key-value pair in a list of key-value pair of Tracestate. type Entry struct { // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and // forward slashes /. Key string // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the // range 0x20 to 0x7E) except comma , and =. Value string } // Entries returns a slice of Entry. func (ts *Tracestate) Entries() []Entry { if ts == nil { return nil } return ts.entries } func (ts *Tracestate) remove(key string) *Entry { for index, entry := range ts.entries { if entry.Key == key { ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) return &entry } } return nil } func (ts *Tracestate) add(entries []Entry) error { for _, entry := range entries { ts.remove(entry.Key) } if len(ts.entries)+len(entries) > maxKeyValuePairs { return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", len(entries), len(ts.entries), maxKeyValuePairs) } ts.entries = append(entries, ts.entries...) return nil } func isValid(entry Entry) bool { return keyValidationRegExp.MatchString(entry.Key) && valueValidationRegExp.MatchString(entry.Value) } func containsDuplicateKey(entries ...Entry) (string, bool) { keyMap := make(map[string]int) for _, entry := range entries { if _, ok := keyMap[entry.Key]; ok { return entry.Key, true } keyMap[entry.Key] = 1 } return "", false } func areEntriesValid(entries ...Entry) (*Entry, bool) { for _, entry := range entries { if !isValid(entry) { return &entry, false } } return nil, true } // New creates a Tracestate object from a parent and/or entries (key-value pair). // Entries from the parent are copied if present. The entries passed to this function // are inserted in front of those copied from the parent. If an entry copied from the // parent contains the same key as one of the entry in entries then the entry copied // from the parent is removed. See add func. // // An error is returned with nil Tracestate if // 1. one or more entry in entries is invalid. // 2. two or more entries in the input entries have the same key. // 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. // (duplicate entry is counted only once). func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { if parent == nil && len(entries) == 0 { return nil, nil } if entry, ok := areEntriesValid(entries...); !ok { return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) } if key, duplicate := containsDuplicateKey(entries...); duplicate { return nil, fmt.Errorf("contains duplicate keys (%s)", key) } tracestate := Tracestate{} if parent != nil && len(parent.entries) > 0 { tracestate.entries = append([]Entry{}, parent.entries...) } err := tracestate.add(entries) if err != nil { return nil, err } return &tracestate, nil } opencensus-go-0.24.0/trace/tracestate/tracestate_test.go000066400000000000000000000234171433102037600233520ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tracestate import ( "fmt" "testing" ) func checkFront(t *testing.T, tracestate *Tracestate, wantKey, testname string) { gotKey := tracestate.entries[0].Key if gotKey != wantKey { t.Errorf("test:%s: first entry in the list: got %q want %q", testname, gotKey, wantKey) } } func checkBack(t *testing.T, tracestate *Tracestate, wantKey, testname string) { gotKey := tracestate.entries[len(tracestate.entries)-1].Key if gotKey != wantKey { t.Errorf("test:%s: last entry in the list: got %q want %q", testname, gotKey, wantKey) } } func checkSize(t *testing.T, tracestate *Tracestate, wantSize int, testname string) { if gotSize := len(tracestate.entries); gotSize != wantSize { t.Errorf("test:%s: size of the list: got %q want %q", testname, gotSize, wantSize) } } func (ts *Tracestate) get(key string) (string, bool) { if ts == nil { return "", false } for _, entry := range ts.entries { if entry.Key == key { return entry.Value, true } } return "", false } func checkKeyValue(t *testing.T, tracestate *Tracestate, key, wantValue, testname string) { wantOk := true if wantValue == "" { wantOk = false } gotValue, gotOk := tracestate.get(key) if wantOk != gotOk || gotValue != wantValue { t.Errorf("test:%s: get value for key=%s failed: got %q want %q", testname, key, gotValue, wantValue) } } func checkError(t *testing.T, tracestate *Tracestate, err error, testname, msg string) { if err != nil { t.Errorf("test:%s: %s: tracestate=%v, error= %v", testname, msg, tracestate, err) } } func wantError(t *testing.T, tracestate *Tracestate, err error, testname, msg string) { if err == nil { t.Errorf("test:%s: %s: tracestate=%v, error=%v", testname, msg, tracestate, err) } } func TestCreateWithNullParent(t *testing.T) { key1, value1 := "hello", "world" testname := "TestCreateWithNullParent" entry := Entry{key1, value1} tracestate, err := New(nil, entry) checkError(t, tracestate, err, testname, "create failed from null parent") checkKeyValue(t, tracestate, key1, value1, testname) } func TestCreateFromParentWithSingleKey(t *testing.T) { key1, value1, key2, value2 := "hello", "world", "foo", "bar" testname := "TestCreateFromParentWithSingleKey" entry1 := Entry{key1, value1} entry2 := Entry{key2, value2} parent, _ := New(nil, entry1) tracestate, err := New(parent, entry2) checkError(t, tracestate, err, testname, "create failed from parent with single key") checkKeyValue(t, tracestate, key2, value2, testname) checkFront(t, tracestate, key2, testname) checkBack(t, tracestate, key1, testname) } func TestCreateFromParentWithDoubleKeys(t *testing.T) { key1, value1, key2, value2, key3, value3 := "hello", "world", "foo", "bar", "bar", "baz" testname := "TestCreateFromParentWithDoubleKeys" entry1 := Entry{key1, value1} entry2 := Entry{key2, value2} entry3 := Entry{key3, value3} parent, _ := New(nil, entry2, entry1) tracestate, err := New(parent, entry3) checkError(t, tracestate, err, testname, "create failed from parent with double keys") checkKeyValue(t, tracestate, key3, value3, testname) checkFront(t, tracestate, key3, testname) checkBack(t, tracestate, key1, testname) } func TestCreateFromParentWithExistingKey(t *testing.T) { key1, value1, key2, value2, key3, value3 := "hello", "world", "foo", "bar", "hello", "baz" testname := "TestCreateFromParentWithExistingKey" entry1 := Entry{key1, value1} entry2 := Entry{key2, value2} entry3 := Entry{key3, value3} parent, _ := New(nil, entry2, entry1) tracestate, err := New(parent, entry3) checkError(t, tracestate, err, testname, "create failed with an existing key") checkKeyValue(t, tracestate, key3, value3, testname) checkFront(t, tracestate, key3, testname) checkBack(t, tracestate, key2, testname) checkSize(t, tracestate, 2, testname) } func TestImplicitImmutableTracestate(t *testing.T) { key1, value1, key2, value2, key3, value3 := "hello", "world", "hello", "bar", "foo", "baz" testname := "TestImplicitImmutableTracestate" entry1 := Entry{key1, value1} entry2 := Entry{key2, value2} parent, _ := New(nil, entry1) tracestate, err := New(parent, entry2) checkError(t, tracestate, err, testname, "create failed") checkKeyValue(t, tracestate, key2, value2, testname) checkKeyValue(t, parent, key2, value1, testname) // Get and update entries. entries := tracestate.Entries() entry := Entry{key3, value3} entries = append(entries, entry) // Check Tracestate does not have key3. checkKeyValue(t, tracestate, key3, "", testname) // Check that we added the key3 in the entries tracestate, err = New(nil, entries...) checkError(t, tracestate, err, testname, "create failed") checkKeyValue(t, tracestate, key3, value3, testname) } func TestKeyWithValidChar(t *testing.T) { testname := "TestKeyWithValidChar" arrayRune := []rune("") for c := 'a'; c <= 'z'; c++ { arrayRune = append(arrayRune, c) } for c := '0'; c <= '9'; c++ { arrayRune = append(arrayRune, c) } arrayRune = append(arrayRune, '_') arrayRune = append(arrayRune, '-') arrayRune = append(arrayRune, '*') arrayRune = append(arrayRune, '/') key := string(arrayRune) entry := Entry{key, "world"} tracestate, err := New(nil, entry) checkError(t, tracestate, err, testname, "create failed when the key contains all valid characters") } func TestKeyWithInvalidChar(t *testing.T) { testname := "TestKeyWithInvalidChar" keys := []string{"1ab", "1ab2", "Abc", " abc", "a=b"} for _, key := range keys { entry := Entry{key, "world"} tracestate, err := New(nil, entry) wantError(t, tracestate, err, testname, fmt.Sprintf( "create did not err with invalid key=%q", key)) } } func TestNilKey(t *testing.T) { testname := "TestNilKey" entry := Entry{"", "world"} tracestate, err := New(nil, entry) wantError(t, tracestate, err, testname, "create did not err when the key is nil (\"\")") } func TestValueWithInvalidChar(t *testing.T) { testname := "TestValueWithInvalidChar" keys := []string{"A=B", "A,B", "AB "} for _, value := range keys { entry := Entry{"hello", value} tracestate, err := New(nil, entry) wantError(t, tracestate, err, testname, fmt.Sprintf("create did not err when the value is invalid (%q)", value)) } } func TestNilValue(t *testing.T) { testname := "TestNilValue" tracestate, err := New(nil, Entry{"hello", ""}) wantError(t, tracestate, err, testname, "create did not err when the value is nil (\"\")") } func TestInvalidKeyLen(t *testing.T) { testname := "TestInvalidKeyLen" arrayRune := []rune("") for i := 0; i <= keyMaxSize+1; i++ { arrayRune = append(arrayRune, 'a') } key := string(arrayRune) tracestate, err := New(nil, Entry{key, "world"}) wantError(t, tracestate, err, testname, fmt.Sprintf("create did not err when the length (%d) of the key is larger than max (%d)", len(key), keyMaxSize)) } func TestInvalidValueLen(t *testing.T) { testname := "TestInvalidValueLen" arrayRune := []rune("") for i := 0; i <= valueMaxSize+1; i++ { arrayRune = append(arrayRune, 'a') } value := string(arrayRune) tracestate, err := New(nil, Entry{"hello", value}) wantError(t, tracestate, err, testname, fmt.Sprintf("create did not err when the length (%d) of the value is larger than max (%d)", len(value), valueMaxSize)) } func TestCreateFromArrayWithOverLimitKVPairs(t *testing.T) { testname := "TestCreateFromArrayWithOverLimitKVPairs" entries := []Entry{} for i := 0; i <= maxKeyValuePairs; i++ { key := fmt.Sprintf("a%db", i) entry := Entry{key, "world"} entries = append(entries, entry) } tracestate, err := New(nil, entries...) wantError(t, tracestate, err, testname, fmt.Sprintf("create did not err when the number (%d) of key-value pairs is larger than max (%d)", len(entries), maxKeyValuePairs)) } func TestCreateFromEmptyArray(t *testing.T) { testname := "TestCreateFromEmptyArray" tracestate, err := New(nil, nil...) checkError(t, tracestate, err, testname, "failed to create nil tracestate") } func TestCreateFromParentWithOverLimitKVPairs(t *testing.T) { testname := "TestCreateFromParentWithOverLimitKVPairs" entries := []Entry{} for i := 0; i < maxKeyValuePairs; i++ { key := fmt.Sprintf("a%db", i) entry := Entry{key, "world"} entries = append(entries, entry) } parent, err := New(nil, entries...) checkError(t, parent, err, testname, fmt.Sprintf("create failed to add %d key-value pair", maxKeyValuePairs)) // Add one more to go over the limit key := fmt.Sprintf("a%d", maxKeyValuePairs) tracestate, err := New(parent, Entry{key, "world"}) wantError(t, tracestate, err, testname, fmt.Sprintf("create did not err when attempted to exceed the key-value pair limit of %d", maxKeyValuePairs)) } func TestCreateFromArrayWithDuplicateKeys(t *testing.T) { key1, value1, key2, value2, key3, value3 := "hello", "world", "foo", "bar", "hello", "baz" testname := "TestCreateFromArrayWithDuplicateKeys" entry1 := Entry{key1, value1} entry2 := Entry{key2, value2} entry3 := Entry{key3, value3} tracestate, err := New(nil, entry1, entry2, entry3) wantError(t, tracestate, err, testname, "create did not err when entries contained duplicate keys") } func TestEntriesWithNil(t *testing.T) { ts, err := New(nil) if err != nil { t.Fatal(err) } if got, want := len(ts.Entries()), 0; got != want { t.Errorf("zero value should have no entries, got %v; want %v", got, want) } } opencensus-go-0.24.0/zpages/000077500000000000000000000000001433102037600156525ustar00rootroot00000000000000opencensus-go-0.24.0/zpages/example_test.go000066400000000000000000000015271433102037600207000ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zpages_test import ( "log" "net/http" "go.opencensus.io/zpages" ) func Example() { // Both /debug/tracez and /debug/rpcz will be served on the default mux. zpages.Handle(nil, "/debug") log.Fatal(http.ListenAndServe("127.0.0.1:9999", nil)) } opencensus-go-0.24.0/zpages/formatter_test.go000066400000000000000000000020501433102037600212400ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package zpages import "testing" func TestCountFormatter(t *testing.T) { tests := []struct { in uint64 want string }{ {0, " "}, {1, "1"}, {1024, "1024"}, {1e5, "100000"}, {1e6, "1.000 M "}, {1e9, "1.000 G "}, {1e8 + 2e9, "2.100 G "}, {1e12, "1.000 T "}, {1e15, "1.000 P "}, {1e18, "1.000 E "}, } for _, tt := range tests { if g, w := countFormatter(tt.in), tt.want; g != w { t.Errorf("%d got %q want %q", tt.in, g, w) } } } opencensus-go-0.24.0/zpages/internal/000077500000000000000000000000001433102037600174665ustar00rootroot00000000000000opencensus-go-0.24.0/zpages/internal/gen.go000066400000000000000000000014071433102037600205700ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package internal // import "go.opencensus.io/zpages/internal" // go get https://github.com/mjibson/esc.git //go:generate esc -pkg internal -o resources.go public/ templates/ opencensus-go-0.24.0/zpages/internal/public/000077500000000000000000000000001433102037600207445ustar00rootroot00000000000000opencensus-go-0.24.0/zpages/internal/public/opencensus.css000066400000000000000000000000001433102037600236260ustar00rootroot00000000000000opencensus-go-0.24.0/zpages/internal/resources.go000066400000000000000000000153371433102037600220400ustar00rootroot00000000000000// Code generated by "esc -pkg resources -o resources.go public/ templates/"; DO NOT EDIT. package internal import ( "bytes" "compress/gzip" "encoding/base64" "io/ioutil" "net/http" "os" "path" "sync" "time" ) type _escLocalFS struct{} var _escLocal _escLocalFS type _escStaticFS struct{} var _escStatic _escStaticFS type _escDirectory struct { fs http.FileSystem name string } type _escFile struct { compressed string size int64 modtime int64 local string isDir bool once sync.Once data []byte name string } func (_escLocalFS) Open(name string) (http.File, error) { f, present := _escData[path.Clean(name)] if !present { return nil, os.ErrNotExist } return os.Open(f.local) } func (_escStaticFS) prepare(name string) (*_escFile, error) { f, present := _escData[path.Clean(name)] if !present { return nil, os.ErrNotExist } var err error f.once.Do(func() { f.name = path.Base(name) if f.size == 0 { return } var gr *gzip.Reader b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed)) gr, err = gzip.NewReader(b64) if err != nil { return } f.data, err = ioutil.ReadAll(gr) }) if err != nil { return nil, err } return f, nil } func (fs _escStaticFS) Open(name string) (http.File, error) { f, err := fs.prepare(name) if err != nil { return nil, err } return f.File() } func (dir _escDirectory) Open(name string) (http.File, error) { return dir.fs.Open(dir.name + name) } func (f *_escFile) File() (http.File, error) { type httpFile struct { *bytes.Reader *_escFile } return &httpFile{ Reader: bytes.NewReader(f.data), _escFile: f, }, nil } func (f *_escFile) Close() error { return nil } func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) { return nil, nil } func (f *_escFile) Stat() (os.FileInfo, error) { return f, nil } func (f *_escFile) Name() string { return f.name } func (f *_escFile) Size() int64 { return f.size } func (f *_escFile) Mode() os.FileMode { return 0 } func (f *_escFile) ModTime() time.Time { return time.Unix(f.modtime, 0) } func (f *_escFile) IsDir() bool { return f.isDir } func (f *_escFile) Sys() interface{} { return f } // FS returns a http.Filesystem for the embedded assets. If useLocal is true, // the filesystem's contents are instead used. func FS(useLocal bool) http.FileSystem { if useLocal { return _escLocal } return _escStatic } // Dir returns a http.Filesystem for the embedded assets on a given prefix dir. // If useLocal is true, the filesystem's contents are instead used. func Dir(useLocal bool, name string) http.FileSystem { if useLocal { return _escDirectory{fs: _escLocal, name: name} } return _escDirectory{fs: _escStatic, name: name} } // FSByte returns the named file from the embedded assets. If useLocal is // true, the filesystem's contents are instead used. func FSByte(useLocal bool, name string) ([]byte, error) { if useLocal { f, err := _escLocal.Open(name) if err != nil { return nil, err } b, err := ioutil.ReadAll(f) _ = f.Close() return b, err } f, err := _escStatic.prepare(name) if err != nil { return nil, err } return f.data, nil } // FSMustByte is the same as FSByte, but panics if name is not present. func FSMustByte(useLocal bool, name string) []byte { b, err := FSByte(useLocal, name) if err != nil { panic(err) } return b } // FSString is the string version of FSByte. func FSString(useLocal bool, name string) (string, error) { b, err := FSByte(useLocal, name) return string(b), err } // FSMustString is the string version of FSMustByte. func FSMustString(useLocal bool, name string) string { return string(FSMustByte(useLocal, name)) } var _escData = map[string]*_escFile{ "/public/opencensus.css": { local: "public/opencensus.css", size: 0, modtime: 1519153040, compressed: ` H4sIAAAAAAAC/wEAAP//AAAAAAAAAAA= `, }, "/templates/footer.html": { local: "templates/footer.html", size: 16, modtime: 1519153248, compressed: ` H4sIAAAAAAAC/7LRT8pPqbTjstHPKMnNseMCBAAA//8ATCBFEAAAAA== `, }, "/templates/header.html": { local: "templates/header.html", size: 523, modtime: 1519164535, compressed: ` H4sIAAAAAAAC/5TRv07rMBQG8D1P4ev1qvat7oKQEwZgYEAwdGF0nZP4UP+JfE6oqqrvjkyKBGIpky0f +6fP+syfu6fbzcvzvfAcQ9eYuohg09hKSLIzHmzfNUIIYSKwFc7bQsCtnHlYXcnziJEDdMej2tTN6WT0 crJMA6adKBBaST4XdjMLdDlJ4QsMrdR6v9+rPEFykGgmhVkP9q1eUeiy1D8ZPgQgD8CfxjRvAzr9BXFE F730zBNdaz3kxKTGnMcAdkJSLkddM9wMNmI4tI+WoaANfx9cTiR/QbvcgxqBYx/q39bqv/qn45lTmHoc 82rCtFMR00fwM06u4MSihwGKoOIuJSvzSrIzehG6xuilSLPN/aHWvP7Wll93zXsAAAD//6iqQ1ULAgAA `, }, "/templates/rpcz.html": { local: "templates/rpcz.html", size: 2626, modtime: 1519164559, compressed: ` H4sIAAAAAAAC/+yW3WrbMBTH7/0UwmUjYyxJU3o1W1C6sQ4WNrq+gCwdfzBFMtJx9+Hl3cex3DhNCrOz XfbGxFZ+5/8D+Ry5bZ0wBbD5VxT4wdmm9tttlNQ8QZFpYFkhrbYuPQMAyHP2vVJYpufL5QueoGNCV4VJ JRgExxNUPMmtQearX5C+XvG2nb+rHEisrNlukwUt8mRB/1ugowuF8GRR8+ggMD7L8/wSIGa5ExtIM/uD SdDa10JWpkiX3V0tlKK7FY8ixhgjp6ECAFwqiHm3FJZLCi2DKnnsLzGphfdprM9jJi0lmfSCX9vG4FTo 6r5gWiAY+ZPNNv7VVP5WILCZq+ViOvvR1A2y2bfsBPZzg6fD752zzndU2Aza47H70r9KGnLka8DSql38 S5P5+u3x9Vgr1HBVUSJfV2bel3i8cOOefn5ncf6c+Zz5XzKfaADyGLrlYn9UvlnxB52DERlFw4Q2oval RRrQDyX3zBVPMhq4oXlo2mZHjXvcyqrXjzv/mAp0A29dmQbht6TfVGscdWMbN5W5syj0I2ik59V98SmM 2F5240elDlynO5kKwjtspO3tl2sa6r2qEwijYnusM50KBdE9aqRqd4DsySqBYnT2Du6UT0OD+AE7Uj6c YKfaD/R0/YH9F/9wiE5uv4BN7L8A/a0BwxxqWzCKPg37b7bdgz8BAAD//6NjPmJCCgAA `, }, "/templates/summary.html": { local: "templates/summary.html", size: 1619, modtime: 1519164559, compressed: ` H4sIAAAAAAAC/6yVPW/bMBCG9/yKg2p4qu2kW12JRQtkCzok3YoOlHSWBdMngaSc2iz/e8EP+Stqi8Re DIo63t3zvjwr1TwXCEpvBWZJ3sgS5US1vKipmsNtwm4AAFItwyI8lFA0QrWcsjvgoq4oE7jQLM3ZU8sJ vvE1prOcpTNdnhxjY8pV+yn8/j5+8KFDiZMCSaNMXPLHjqim6i2pB5v/OFDjgWukYgtPfN0KVFerNcRz L2Ujhyuls17xv0t/pcbelsYYyalCmEbBvnbFCrVzXlmb6uU/wX8YM7X2Z0ReMmOQSmuviRIENGbEYZ7B 9LvkBap7KtumJm2teyNqWin/9sGt/GaAGsnmuaYSf733Sx/z2DyHkAmMiK/RbzreuFkvADdIh7NOBrkf LF6sKtl0VM7hHSImjlko9EGBHyZRAUdvTMzdD8b/9IgtRKijVC/k57CUuMgSp421n3dOOgeUGePBrB3v 9LbF7NY1Of1S6HrjG+HsUMr1ft7wIXIfdUb1aoa9Ib0bGy66IH28d07ACxjvxjvV5X5pzCj65rhDpSPs /o6e0J9Pge+G+dv98tClYlxs6IcDbPDW/wGpE8cGfB2Iiij9kHnIdOY/JezmTwAAAP//Dz6TJ1MGAAA= `, }, "/templates/traces.html": { local: "templates/traces.html", size: 420, modtime: 1519164578, compressed: ` H4sIAAAAAAAC/4yQsU70MBCEez/FKtIv3RW/w6WgOIw7kGgoDiRqO14gwnGM1xEgs++OnKMA5Qq2ssYz I82nolZW30UT4NaMuIdSZH0wg2qtVm3UQkVd1XlkhgO+zkiZvj8SavHwjAFO35U3kdDBhrDfiv9/PFFK MuEJQR6mN2IuJaYh5Edo/nXn1MBmCA7fQV4P6B3B2ZYZfnh23dqzO3p+i12tlp85mR4HxyxKweCYVbvs UjYt25UFyh8eL5t+8lPaWz/jRaPva+zGVUowogkEZMbo0UE6MpKiIlinTf9yMh6mvKpYMH8FAAD//yQs JUakAQAA `, }, "/": { isDir: true, local: "", }, "/public": { isDir: true, local: "public", }, "/templates": { isDir: true, local: "templates", }, } opencensus-go-0.24.0/zpages/internal/templates/000077500000000000000000000000001433102037600214645ustar00rootroot00000000000000opencensus-go-0.24.0/zpages/internal/templates/footer.html000066400000000000000000000000201433102037600236400ustar00rootroot00000000000000 opencensus-go-0.24.0/zpages/internal/templates/header.html000066400000000000000000000010131433102037600235750ustar00rootroot00000000000000 {{.Title}}

{{.Title}}

opencensus-go-0.24.0/zpages/internal/templates/rpcz.html000066400000000000000000000051021433102037600233260ustar00rootroot00000000000000{{range .StatGroups}}

{{.Direction}}

{{range .Snapshots}} {{end}}
Count Avg latency (ms) Rate (rpc/s) Input (kb/s) Output (kb/s) Errors
Method     Min.Hr.Tot.     Min.Hr.Tot.     Min.Hr.Tot.     Min.Hr.Tot.     Min.Hr.Tot.     Min.Hr.Tot.
 
{{.Method}} {{.CountMinute|count}} {{.CountHour|count}} {{.CountTotal|count}} {{.AvgLatencyMinute|ms}} {{.AvgLatencyHour|ms}} {{.AvgLatencyTotal|ms}} {{.RPCRateMinute|rate}} {{.RPCRateHour|rate}} {{.RPCRateTotal|rate}} {{.InputRateMinute|datarate}} {{.InputRateHour|datarate}} {{.InputRateTotal|datarate}} {{.OutputRateMinute|datarate}} {{.OutputRateHour|datarate}} {{.OutputRateTotal|datarate}} {{.ErrorsMinute|count}} {{.ErrorsHour|count}} {{.ErrorsTotal|count}}
{{end}} opencensus-go-0.24.0/zpages/internal/templates/summary.html000066400000000000000000000031231433102037600240460ustar00rootroot00000000000000 {{range .LatencyBucketNames}}{{end}} {{$a := .TracesEndpoint}} {{$links := .Links}} {{range $rowindex, $row := .Rows}} {{- $name := .Name}} {{- if even $rowindex}}{{else}}{{end -}} {{- if $links -}} {{- else -}} {{- end -}} {{- if $links -}} {{range $index, $value := .Latency}}{{end}} {{- else -}} {{range .Latency}}{{end}} {{- end -}} {{- if $links -}} {{- else -}} {{- end -}} {{end}}
Span Name   |  Running   |   Latency Samples   |   Error Samples
  |     |  [{{.}}]  |  
{{.Name}}  |  {{.Active}}{{.Active}}  |  {{$value}}{{.}}  |  {{.Errors}}{{.Errors}}
opencensus-go-0.24.0/zpages/internal/templates/traces.html000066400000000000000000000006441433102037600236370ustar00rootroot00000000000000

Span Name: {{.Name}}

{{.Num}} Requests

When                       Elapsed (sec)
----------------------------------------
{{range .Rows}}{{printf "%26s" (index .Fields 0)}} {{printf "%12s" (index .Fields 1)}} {{index .Fields 2}}{{.|traceid}}
{{end}}

TraceId means sampled request. TraceId means not sampled request.

opencensus-go-0.24.0/zpages/rpcz.go000066400000000000000000000210641433102037600171620ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package zpages import ( "fmt" "io" "log" "math" "net/http" "sort" "sync" "text/tabwriter" "time" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" ) const bytesPerKb = 1024 var ( programStartTime = time.Now() mu sync.Mutex // protects snaps snaps = make(map[methodKey]*statSnapshot) // viewType lists the views we are interested in for RPC stats. // A view's map value indicates whether that view contains data for received // RPCs. viewType = map[*view.View]bool{ ocgrpc.ClientCompletedRPCsView: false, ocgrpc.ClientSentBytesPerRPCView: false, ocgrpc.ClientSentMessagesPerRPCView: false, ocgrpc.ClientReceivedBytesPerRPCView: false, ocgrpc.ClientReceivedMessagesPerRPCView: false, ocgrpc.ClientRoundtripLatencyView: false, ocgrpc.ServerCompletedRPCsView: true, ocgrpc.ServerReceivedBytesPerRPCView: true, ocgrpc.ServerReceivedMessagesPerRPCView: true, ocgrpc.ServerSentBytesPerRPCView: true, ocgrpc.ServerSentMessagesPerRPCView: true, ocgrpc.ServerLatencyView: true, } ) func registerRPCViews() { views := make([]*view.View, 0, len(viewType)) for v := range viewType { views = append(views, v) } if err := view.Register(views...); err != nil { log.Printf("error subscribing to views: %v", err) } view.RegisterExporter(snapExporter{}) } func rpczHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html; charset=utf-8") WriteHTMLRpczPage(w) } // WriteHTMLRpczPage writes an HTML document to w containing per-method RPC stats. func WriteHTMLRpczPage(w io.Writer) { if err := headerTemplate.Execute(w, headerData{Title: "RPC Stats"}); err != nil { log.Printf("zpages: executing template: %v", err) } WriteHTMLRpczSummary(w) if err := footerTemplate.Execute(w, nil); err != nil { log.Printf("zpages: executing template: %v", err) } } // WriteHTMLRpczSummary writes HTML to w containing per-method RPC stats. // // It includes neither a header nor footer, so you can embed this data in other pages. func WriteHTMLRpczSummary(w io.Writer) { mu.Lock() if err := statsTemplate.Execute(w, getStatsPage()); err != nil { log.Printf("zpages: executing template: %v", err) } mu.Unlock() } // WriteTextRpczPage writes formatted text to w containing per-method RPC stats. func WriteTextRpczPage(w io.Writer) { mu.Lock() defer mu.Unlock() page := getStatsPage() for i, sg := range page.StatGroups { switch i { case 0: fmt.Fprint(w, "Sent:\n") case 1: fmt.Fprint(w, "\nReceived:\n") } tw := tabwriter.NewWriter(w, 6, 8, 1, ' ', 0) fmt.Fprint(tw, "Method\tCount\t\t\tAvgLat\t\t\tMaxLat\t\t\tRate\t\t\tIn (MiB/s)\t\t\tOut (MiB/s)\t\t\tErrors\t\t\n") fmt.Fprint(tw, "\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\tMin\tHr\tTot\n") for _, s := range sg.Snapshots { fmt.Fprintf(tw, "%s\t%d\t%d\t%d\t%v\t%v\t%v\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%d\t%d\t%d\n", s.Method, s.CountMinute, s.CountHour, s.CountTotal, s.AvgLatencyMinute, s.AvgLatencyHour, s.AvgLatencyTotal, s.RPCRateMinute, s.RPCRateHour, s.RPCRateTotal, s.InputRateMinute/bytesPerKb, s.InputRateHour/bytesPerKb, s.InputRateTotal/bytesPerKb, s.OutputRateMinute/bytesPerKb, s.OutputRateHour/bytesPerKb, s.OutputRateTotal/bytesPerKb, s.ErrorsMinute, s.ErrorsHour, s.ErrorsTotal) } tw.Flush() } } // headerData contains data for the header template. type headerData struct { Title string } // statsPage aggregates stats on the page for 'sent' and 'received' categories type statsPage struct { StatGroups []*statGroup } // statGroup aggregates snapshots for a directional category type statGroup struct { Direction string Snapshots []*statSnapshot } func (s *statGroup) Len() int { return len(s.Snapshots) } func (s *statGroup) Swap(i, j int) { s.Snapshots[i], s.Snapshots[j] = s.Snapshots[j], s.Snapshots[i] } func (s *statGroup) Less(i, j int) bool { return s.Snapshots[i].Method < s.Snapshots[j].Method } // statSnapshot holds the data items that are presented in a single row of RPC // stat information. type statSnapshot struct { // TODO: compute hour/minute values from cumulative Method string Received bool CountMinute uint64 CountHour uint64 CountTotal uint64 AvgLatencyMinute time.Duration AvgLatencyHour time.Duration AvgLatencyTotal time.Duration RPCRateMinute float64 RPCRateHour float64 RPCRateTotal float64 InputRateMinute float64 InputRateHour float64 InputRateTotal float64 OutputRateMinute float64 OutputRateHour float64 OutputRateTotal float64 ErrorsMinute uint64 ErrorsHour uint64 ErrorsTotal uint64 } type methodKey struct { method string received bool } type snapExporter struct{} func (s snapExporter) ExportView(vd *view.Data) { received, ok := viewType[vd.View] if !ok { return } if len(vd.Rows) == 0 { return } ageSec := float64(time.Since(programStartTime)) / float64(time.Second) computeRate := func(maxSec, x float64) float64 { dur := ageSec if maxSec > 0 && dur > maxSec { dur = maxSec } return x / dur } convertTime := func(ms float64) time.Duration { if math.IsInf(ms, 0) || math.IsNaN(ms) { return 0 } return time.Duration(float64(time.Millisecond) * ms) } haveResetErrors := make(map[string]struct{}) mu.Lock() defer mu.Unlock() for _, row := range vd.Rows { var method string for _, tag := range row.Tags { if tag.Key == ocgrpc.KeyClientMethod || tag.Key == ocgrpc.KeyServerMethod { method = tag.Value break } } key := methodKey{method: method, received: received} s := snaps[key] if s == nil { s = &statSnapshot{Method: method, Received: received} snaps[key] = s } var ( sum float64 count float64 ) switch v := row.Data.(type) { case *view.CountData: sum = float64(v.Value) count = float64(v.Value) case *view.DistributionData: sum = v.Sum() count = float64(v.Count) case *view.SumData: sum = v.Value count = v.Value } // Update field of s corresponding to the view. switch vd.View { case ocgrpc.ClientCompletedRPCsView: if _, ok := haveResetErrors[method]; !ok { haveResetErrors[method] = struct{}{} s.ErrorsTotal = 0 } for _, tag := range row.Tags { if tag.Key == ocgrpc.KeyClientStatus && tag.Value != "OK" { s.ErrorsTotal += uint64(count) } } case ocgrpc.ClientRoundtripLatencyView: s.AvgLatencyTotal = convertTime(sum / count) case ocgrpc.ClientSentBytesPerRPCView: s.OutputRateTotal = computeRate(0, sum) case ocgrpc.ClientReceivedBytesPerRPCView: s.InputRateTotal = computeRate(0, sum) case ocgrpc.ClientSentMessagesPerRPCView: s.CountTotal = uint64(count) s.RPCRateTotal = computeRate(0, count) case ocgrpc.ClientReceivedMessagesPerRPCView: // currently unused case ocgrpc.ServerCompletedRPCsView: if _, ok := haveResetErrors[method]; !ok { haveResetErrors[method] = struct{}{} s.ErrorsTotal = 0 } for _, tag := range row.Tags { if tag.Key == ocgrpc.KeyServerStatus && tag.Value != "OK" { s.ErrorsTotal += uint64(count) } } case ocgrpc.ServerLatencyView: s.AvgLatencyTotal = convertTime(sum / count) case ocgrpc.ServerSentBytesPerRPCView: s.OutputRateTotal = computeRate(0, sum) case ocgrpc.ServerReceivedMessagesPerRPCView: s.CountTotal = uint64(count) s.RPCRateTotal = computeRate(0, count) case ocgrpc.ServerSentMessagesPerRPCView: // currently unused } } } func getStatsPage() *statsPage { sentStats := statGroup{Direction: "Sent"} receivedStats := statGroup{Direction: "Received"} for key, sg := range snaps { if key.received { receivedStats.Snapshots = append(receivedStats.Snapshots, sg) } else { sentStats.Snapshots = append(sentStats.Snapshots, sg) } } sort.Sort(&sentStats) sort.Sort(&receivedStats) return &statsPage{ StatGroups: []*statGroup{&sentStats, &receivedStats}, } } opencensus-go-0.24.0/zpages/rpcz_test.go000066400000000000000000000025501433102037600202200ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package zpages import ( "context" "testing" "time" "go.opencensus.io/internal/testpb" "go.opencensus.io/stats/view" ) func TestRpcz(t *testing.T) { client, cleanup := testpb.NewTestClient(t) defer cleanup() _, err := client.Single(context.Background(), &testpb.FooRequest{}) if err != nil { t.Fatal(err) } view.SetReportingPeriod(time.Millisecond) time.Sleep(2 * time.Millisecond) view.SetReportingPeriod(time.Second) mu.Lock() defer mu.Unlock() if len(snaps) == 0 { t.Fatal("Expected len(snaps) > 0") } snapshot, ok := snaps[methodKey{"testpb.Foo/Single", false}] if !ok { t.Fatal("Expected method stats not recorded") } if got, want := snapshot.CountTotal, uint64(1); got != want { t.Errorf("snapshot.CountTotal = %d; want %d", got, want) } } opencensus-go-0.24.0/zpages/templates.go000066400000000000000000000060201433102037600201750ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package zpages import ( "fmt" "html/template" "io/ioutil" "log" "strconv" "time" "go.opencensus.io/trace" "go.opencensus.io/zpages/internal" ) var ( fs = internal.FS(false) templateFunctions = template.FuncMap{ "count": countFormatter, "ms": msFormatter, "rate": rateFormatter, "datarate": dataRateFormatter, "even": even, "traceid": traceIDFormatter, } headerTemplate = parseTemplate("header") summaryTableTemplate = parseTemplate("summary") statsTemplate = parseTemplate("rpcz") tracesTableTemplate = parseTemplate("traces") footerTemplate = parseTemplate("footer") ) func parseTemplate(name string) *template.Template { f, err := fs.Open("/templates/" + name + ".html") if err != nil { log.Panicf("%v: %v", name, err) } defer f.Close() text, err := ioutil.ReadAll(f) if err != nil { log.Panicf("%v: %v", name, err) } return template.Must(template.New(name).Funcs(templateFunctions).Parse(string(text))) } func countFormatter(num uint64) string { if num <= 0 { return " " } var floatVal float64 var suffix string if num >= 1e18 { floatVal = float64(num) / 1e18 suffix = " E " } else if num >= 1e15 { floatVal = float64(num) / 1e15 suffix = " P " } else if num >= 1e12 { floatVal = float64(num) / 1e12 suffix = " T " } else if num >= 1e9 { floatVal = float64(num) / 1e9 suffix = " G " } else if num >= 1e6 { floatVal = float64(num) / 1e6 suffix = " M " } if floatVal != 0 { return fmt.Sprintf("%1.3f%s", floatVal, suffix) } return fmt.Sprint(num) } func msFormatter(d time.Duration) string { if d == 0 { return "0" } if d < 10*time.Millisecond { return fmt.Sprintf("%.3f", float64(d)*1e-6) } return strconv.Itoa(int(d / time.Millisecond)) } func rateFormatter(r float64) string { return fmt.Sprintf("%.3f", r) } func dataRateFormatter(b float64) string { return fmt.Sprintf("%.3f", b/1e6) } func traceIDFormatter(r traceRow) template.HTML { sc := r.SpanContext if sc == (trace.SpanContext{}) { return "" } col := "black" if sc.TraceOptions.IsSampled() { col = "blue" } if r.ParentSpanID != (trace.SpanID{}) { return template.HTML(fmt.Sprintf(`trace_id: %s span_id: %s parent_span_id: %s`, col, sc.TraceID, sc.SpanID, r.ParentSpanID)) } return template.HTML(fmt.Sprintf(`trace_id: %s span_id: %s`, col, sc.TraceID, sc.SpanID)) } func even(x int) bool { return x%2 == 0 } opencensus-go-0.24.0/zpages/templates_test.go000066400000000000000000000067021433102037600212430ustar00rootroot00000000000000// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package zpages import ( "bytes" "html/template" "testing" ) const tmplBody = ` {{.Method}} {{.CountMinute|count}} {{.CountHour|count}} {{.CountTotal|count}} {{.AvgLatencyMinute|ms}} {{.AvgLatencyHour|ms}} {{.AvgLatencyTotal|ms}} {{.RPCRateMinute|rate}} {{.RPCRateHour|rate}} {{.RPCRateTotal|rate}} {{.InputRateMinute|datarate}} {{.InputRateHour|datarate}} {{.InputRateTotal|datarate}} {{.OutputRateMinute|datarate}} {{.OutputRateHour|datarate}} {{.OutputRateTotal|datarate}} {{.ErrorsMinute|count}} {{.ErrorsHour|count}} {{.ErrorsTotal|count}} ` var tmpl = template.Must(template.New("countTest").Funcs(templateFunctions).Parse(tmplBody)) func TestTemplateFuncs(t *testing.T) { buf := new(bytes.Buffer) sshot := &statSnapshot{ Method: "Foo", CountMinute: 1e9, CountHour: 5000, CountTotal: 1e12, AvgLatencyMinute: 10000, AvgLatencyHour: 1000, AvgLatencyTotal: 20000, RPCRateMinute: 2000, RPCRateHour: 5000, RPCRateTotal: 75000, InputRateMinute: 75000, InputRateHour: 75000, InputRateTotal: 75000, OutputRateMinute: 75000, OutputRateHour: 75000, OutputRateTotal: 75000, ErrorsMinute: 120000000, ErrorsHour: 75000000, ErrorsTotal: 7500000, } if err := tmpl.Execute(buf, sshot); err != nil { t.Fatalf("Failed to execute template: %v", err) } want := ` Foo 1.000 G 5000 1.000 T 0.010 0.001 0.020 2000.000 5000.000 75000.000 0.075 0.075 0.075 0.075 0.075 0.075 120.000 M 75.000 M 7.500 M ` if g, w := buf.String(), want; g != w { t.Errorf("Output mismatch:\nGot:\n\t%s\nWant:\n\t%s", g, w) } } opencensus-go-0.24.0/zpages/tracez.go000066400000000000000000000274121433102037600174770ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package zpages import ( "fmt" "io" "log" "net/http" "sort" "strconv" "strings" "text/tabwriter" "time" "go.opencensus.io/internal" "go.opencensus.io/trace" ) const ( // spanNameQueryField is the header for span name. spanNameQueryField = "zspanname" // spanTypeQueryField is the header for type (running = 0, latency = 1, error = 2) to display. spanTypeQueryField = "ztype" // spanSubtypeQueryField is the header for sub-type: // * for latency based samples [0, 8] representing the latency buckets, where 0 is the first one; // * for error based samples, 0 means all, otherwise the error code; spanSubtypeQueryField = "zsubtype" // maxTraceMessageLength is the maximum length of a message in tracez output. maxTraceMessageLength = 1024 ) var ( defaultLatencies = [...]time.Duration{ 10 * time.Microsecond, 100 * time.Microsecond, time.Millisecond, 10 * time.Millisecond, 100 * time.Millisecond, time.Second, 10 * time.Second, 100 * time.Second, } canonicalCodes = [...]string{ "OK", "CANCELLED", "UNKNOWN", "INVALID_ARGUMENT", "DEADLINE_EXCEEDED", "NOT_FOUND", "ALREADY_EXISTS", "PERMISSION_DENIED", "RESOURCE_EXHAUSTED", "FAILED_PRECONDITION", "ABORTED", "OUT_OF_RANGE", "UNIMPLEMENTED", "INTERNAL", "UNAVAILABLE", "DATA_LOSS", "UNAUTHENTICATED", } ) func canonicalCodeString(code int32) string { if code < 0 || int(code) >= len(canonicalCodes) { return "error code " + strconv.FormatInt(int64(code), 10) } return canonicalCodes[code] } func tracezHandler(w http.ResponseWriter, r *http.Request) { r.ParseForm() w.Header().Set("Content-Type", "text/html; charset=utf-8") name := r.Form.Get(spanNameQueryField) t, _ := strconv.Atoi(r.Form.Get(spanTypeQueryField)) st, _ := strconv.Atoi(r.Form.Get(spanSubtypeQueryField)) WriteHTMLTracezPage(w, name, t, st) } // WriteHTMLTracezPage writes an HTML document to w containing locally-sampled trace spans. func WriteHTMLTracezPage(w io.Writer, spanName string, spanType, spanSubtype int) { if err := headerTemplate.Execute(w, headerData{Title: "Trace Spans"}); err != nil { log.Printf("zpages: executing template: %v", err) } WriteHTMLTracezSummary(w) WriteHTMLTracezSpans(w, spanName, spanType, spanSubtype) if err := footerTemplate.Execute(w, nil); err != nil { log.Printf("zpages: executing template: %v", err) } } // WriteHTMLTracezSummary writes HTML to w containing a summary of locally-sampled trace spans. // // It includes neither a header nor footer, so you can embed this data in other pages. func WriteHTMLTracezSummary(w io.Writer) { if err := summaryTableTemplate.Execute(w, getSummaryPageData()); err != nil { log.Printf("zpages: executing template: %v", err) } } // WriteHTMLTracezSpans writes HTML to w containing locally-sampled trace spans. // // It includes neither a header nor footer, so you can embed this data in other pages. func WriteHTMLTracezSpans(w io.Writer, spanName string, spanType, spanSubtype int) { if spanName == "" { return } if err := tracesTableTemplate.Execute(w, traceDataFromSpans(spanName, traceSpans(spanName, spanType, spanSubtype))); err != nil { log.Printf("zpages: executing template: %v", err) } } // WriteTextTracezSpans writes formatted text to w containing locally-sampled trace spans. func WriteTextTracezSpans(w io.Writer, spanName string, spanType, spanSubtype int) { spans := traceSpans(spanName, spanType, spanSubtype) data := traceDataFromSpans(spanName, spans) writeTextTraces(w, data) } // WriteTextTracezSummary writes formatted text to w containing a summary of locally-sampled trace spans. func WriteTextTracezSummary(w io.Writer) { w.Write([]byte("Locally sampled spans summary\n\n")) data := getSummaryPageData() if len(data.Rows) == 0 { return } tw := tabwriter.NewWriter(w, 8, 8, 1, ' ', 0) for i, s := range data.Header { if i != 0 { tw.Write([]byte("\t")) } tw.Write([]byte(s)) } tw.Write([]byte("\n")) put := func(x int) { if x == 0 { tw.Write([]byte(".\t")) return } fmt.Fprintf(tw, "%d\t", x) } for _, r := range data.Rows { tw.Write([]byte(r.Name)) tw.Write([]byte("\t")) put(r.Active) for _, l := range r.Latency { put(l) } put(r.Errors) tw.Write([]byte("\n")) } tw.Flush() } // traceData contains data for the trace data template. type traceData struct { Name string Num int Rows []traceRow } type traceRow struct { Fields [3]string trace.SpanContext ParentSpanID trace.SpanID } type events []interface{} func (e events) Len() int { return len(e) } func (e events) Less(i, j int) bool { var ti time.Time switch x := e[i].(type) { case *trace.Annotation: ti = x.Time case *trace.MessageEvent: ti = x.Time } switch x := e[j].(type) { case *trace.Annotation: return ti.Before(x.Time) case *trace.MessageEvent: return ti.Before(x.Time) } return false } func (e events) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func traceRows(s *trace.SpanData) []traceRow { start := s.StartTime lasty, lastm, lastd := start.Date() wholeTime := func(t time.Time) string { return t.Format("2006/01/02-15:04:05") + fmt.Sprintf(".%06d", t.Nanosecond()/1000) } formatTime := func(t time.Time) string { y, m, d := t.Date() if y == lasty && m == lastm && d == lastd { return t.Format(" 15:04:05") + fmt.Sprintf(".%06d", t.Nanosecond()/1000) } lasty, lastm, lastd = y, m, d return wholeTime(t) } lastTime := start formatElapsed := func(t time.Time) string { d := t.Sub(lastTime) lastTime = t u := int64(d / 1000) // There are five cases for duration printing: // -1234567890s // -1234.123456 // .123456 // 12345.123456 // 12345678901s switch { case u < -9999999999: return fmt.Sprintf("%11ds", u/1e6) case u < 0: sec := u / 1e6 u -= sec * 1e6 return fmt.Sprintf("%5d.%06d", sec, -u) case u < 1e6: return fmt.Sprintf(" .%6d", u) case u <= 99999999999: sec := u / 1e6 u -= sec * 1e6 return fmt.Sprintf("%5d.%06d", sec, u) default: return fmt.Sprintf("%11ds", u/1e6) } } firstRow := traceRow{Fields: [3]string{wholeTime(start), "", ""}, SpanContext: s.SpanContext, ParentSpanID: s.ParentSpanID} if s.EndTime.IsZero() { firstRow.Fields[1] = " " } else { firstRow.Fields[1] = formatElapsed(s.EndTime) lastTime = start } out := []traceRow{firstRow} formatAttributes := func(a map[string]interface{}) string { if len(a) == 0 { return "" } var keys []string for key := range a { keys = append(keys, key) } sort.Strings(keys) var s []string for _, key := range keys { val := a[key] switch val.(type) { case string: s = append(s, fmt.Sprintf("%s=%q", key, val)) default: s = append(s, fmt.Sprintf("%s=%v", key, val)) } } return "Attributes:{" + strings.Join(s, ", ") + "}" } if s.Status != (trace.Status{}) { msg := fmt.Sprintf("Status{canonicalCode=%s, description=%q}", canonicalCodeString(s.Status.Code), s.Status.Message) out = append(out, traceRow{Fields: [3]string{"", "", msg}}) } if len(s.Attributes) != 0 { out = append(out, traceRow{Fields: [3]string{"", "", formatAttributes(s.Attributes)}}) } var es events for i := range s.Annotations { es = append(es, &s.Annotations[i]) } for i := range s.MessageEvents { es = append(es, &s.MessageEvents[i]) } sort.Sort(es) for _, e := range es { switch e := e.(type) { case *trace.Annotation: msg := e.Message if len(e.Attributes) != 0 { msg = msg + " " + formatAttributes(e.Attributes) } row := traceRow{Fields: [3]string{ formatTime(e.Time), formatElapsed(e.Time), msg, }} out = append(out, row) case *trace.MessageEvent: row := traceRow{Fields: [3]string{formatTime(e.Time), formatElapsed(e.Time)}} switch e.EventType { case trace.MessageEventTypeSent: row.Fields[2] = fmt.Sprintf("sent message [%d bytes, %d compressed bytes]", e.UncompressedByteSize, e.CompressedByteSize) case trace.MessageEventTypeRecv: row.Fields[2] = fmt.Sprintf("received message [%d bytes, %d compressed bytes]", e.UncompressedByteSize, e.CompressedByteSize) } out = append(out, row) } } for i := range out { if len(out[i].Fields[2]) > maxTraceMessageLength { out[i].Fields[2] = out[i].Fields[2][:maxTraceMessageLength] } } return out } func traceSpans(spanName string, spanType, spanSubtype int) []*trace.SpanData { internalTrace := internal.Trace.(interface { ReportActiveSpans(name string) []*trace.SpanData ReportSpansByError(name string, code int32) []*trace.SpanData ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*trace.SpanData }) var spans []*trace.SpanData switch spanType { case 0: // active spans = internalTrace.ReportActiveSpans(spanName) case 1: // latency var min, max time.Duration n := len(defaultLatencies) if spanSubtype == 0 { max = defaultLatencies[0] } else if spanSubtype == n { min, max = defaultLatencies[spanSubtype-1], (1<<63)-1 } else if 0 < spanSubtype && spanSubtype < n { min, max = defaultLatencies[spanSubtype-1], defaultLatencies[spanSubtype] } spans = internalTrace.ReportSpansByLatency(spanName, min, max) case 2: // error spans = internalTrace.ReportSpansByError(spanName, 0) } return spans } func traceDataFromSpans(name string, spans []*trace.SpanData) traceData { data := traceData{ Name: name, Num: len(spans), } for _, s := range spans { data.Rows = append(data.Rows, traceRows(s)...) } return data } func writeTextTraces(w io.Writer, data traceData) { tw := tabwriter.NewWriter(w, 1, 8, 1, ' ', 0) fmt.Fprint(tw, "When\tElapsed(s)\tType\n") for _, r := range data.Rows { tw.Write([]byte(r.Fields[0])) tw.Write([]byte("\t")) tw.Write([]byte(r.Fields[1])) tw.Write([]byte("\t")) tw.Write([]byte(r.Fields[2])) if sc := r.SpanContext; sc != (trace.SpanContext{}) { fmt.Fprintf(tw, "trace_id: %s span_id: %s", sc.TraceID, sc.SpanID) if r.ParentSpanID != (trace.SpanID{}) { fmt.Fprintf(tw, " parent_span_id: %s", r.ParentSpanID) } } tw.Write([]byte("\n")) } tw.Flush() } type summaryPageData struct { Header []string LatencyBucketNames []string Links bool TracesEndpoint string Rows []summaryPageRow } type summaryPageRow struct { Name string Active int Latency []int Errors int } func getSummaryPageData() summaryPageData { data := summaryPageData{ Links: true, TracesEndpoint: "tracez", } internalTrace := internal.Trace.(interface { ReportSpansPerMethod() map[string]internal.PerMethodSummary }) for name, s := range internalTrace.ReportSpansPerMethod() { if len(data.Header) == 0 { data.Header = []string{"Name", "Active"} for _, b := range s.LatencyBuckets { l := b.MinLatency s := fmt.Sprintf(">%v", l) if l == 100*time.Second { s = ">100s" } data.Header = append(data.Header, s) data.LatencyBucketNames = append(data.LatencyBucketNames, s) } data.Header = append(data.Header, "Errors") } row := summaryPageRow{Name: name, Active: s.Active} for _, l := range s.LatencyBuckets { row.Latency = append(row.Latency, l.Size) } for _, e := range s.ErrorBuckets { row.Errors += e.Size } data.Rows = append(data.Rows, row) } sort.Slice(data.Rows, func(i, j int) bool { return data.Rows[i].Name < data.Rows[j].Name }) return data } opencensus-go-0.24.0/zpages/zpages.go000066400000000000000000000041111433102037600174670ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Package zpages implements a collection of HTML pages that display RPC stats // and trace data, and also functions to write that same data in plain text to // an io.Writer. // // Users can also embed the HTML for stats and traces in custom status pages. // // zpages are currrently work-in-process and cannot display minutely and // hourly stats correctly. // // # Performance // // Installing the zpages has a performance overhead because additional traces // and stats will be collected in-process. In most cases, we expect this // overhead will not be significant but it depends on many factors, including // how many spans your process creates and how richly annotated they are. package zpages // import "go.opencensus.io/zpages" import ( "net/http" "path" "sync" "go.opencensus.io/internal" ) // TODO(ramonza): Remove Handler to make initialization lazy. // Handler is deprecated: Use Handle. var Handler http.Handler func init() { mux := http.NewServeMux() Handle(mux, "/") Handler = mux } // Handle adds the z-pages to the given ServeMux rooted at pathPrefix. func Handle(mux *http.ServeMux, pathPrefix string) { enable() if mux == nil { mux = http.DefaultServeMux } mux.HandleFunc(path.Join(pathPrefix, "rpcz"), rpczHandler) mux.HandleFunc(path.Join(pathPrefix, "tracez"), tracezHandler) mux.Handle(path.Join(pathPrefix, "public/"), http.FileServer(fs)) } var enableOnce sync.Once func enable() { enableOnce.Do(func() { internal.LocalSpanStoreEnabled = true registerRPCViews() }) } opencensus-go-0.24.0/zpages/zpages_test.go000066400000000000000000000110301433102037600205240ustar00rootroot00000000000000// Copyright 2017, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package zpages import ( "bytes" "reflect" "testing" "time" "fmt" "net/http" "net/http/httptest" "go.opencensus.io/trace" ) var ( tid = trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 4, 8, 16, 32, 64, 128} sid = trace.SpanID{1, 2, 4, 8, 16, 32, 64, 128} sid2 = trace.SpanID{0, 3, 5, 9, 17, 33, 65, 129} ) func TestTraceRows(t *testing.T) { now := time.Now() later := now.Add(2 * time.Second) data := traceDataFromSpans("foo", []*trace.SpanData{{ SpanContext: trace.SpanContext{TraceID: tid, SpanID: sid}, ParentSpanID: sid2, Name: "foo", StartTime: now, EndTime: later, Attributes: map[string]interface{}{"stringkey": "stringvalue", "intkey": 42, "boolkey": true}, Annotations: []trace.Annotation{ {Time: now.Add(time.Millisecond), Message: "hello, world", Attributes: map[string]interface{}{"foo": "bar"}}, {Time: now.Add(1500 * time.Millisecond), Message: "hello, world"}, }, MessageEvents: []trace.MessageEvent{ {Time: now, EventType: 2, MessageID: 0x3, UncompressedByteSize: 0x190, CompressedByteSize: 0x12c}, {Time: later, EventType: 1, MessageID: 0x1, UncompressedByteSize: 0xc8, CompressedByteSize: 0x64}, }, Status: trace.Status{ Code: 1, Message: "d'oh!", }, }}) fakeTime := "2006/01/02-15:04:05.123456" for i := range data.Rows { data.Rows[i].Fields[0] = fakeTime } if want := (traceData{ Name: "foo", Num: 1, Rows: []traceRow{ {Fields: [3]string{fakeTime, " 2.000000", ""}, SpanContext: trace.SpanContext{TraceID: tid, SpanID: sid}, ParentSpanID: sid2}, {Fields: [3]string{fakeTime, "", `Status{canonicalCode=CANCELLED, description="d'oh!"}`}}, {Fields: [3]string{fakeTime, "", `Attributes:{boolkey=true, intkey=42, stringkey="stringvalue"}`}}, {Fields: [3]string{fakeTime, " . 0", "received message [400 bytes, 300 compressed bytes]"}}, {Fields: [3]string{fakeTime, " . 1000", `hello, world Attributes:{foo="bar"}`}}, {Fields: [3]string{fakeTime, " 1.499000", "hello, world"}}, {Fields: [3]string{fakeTime, " .500000", "sent message [200 bytes, 100 compressed bytes]"}}}}); !reflect.DeepEqual(data, want) { t.Errorf("traceRows: got %v want %v\n", data, want) } var buf bytes.Buffer writeTextTraces(&buf, data) if want := `When Elapsed(s) Type 2006/01/02-15:04:05.123456 2.000000 trace_id: 01020304050607080102040810204080 span_id: 0102040810204080 parent_span_id: 0003050911214181 2006/01/02-15:04:05.123456 Status{canonicalCode=CANCELLED, description="d'oh!"} 2006/01/02-15:04:05.123456 Attributes:{boolkey=true, intkey=42, stringkey="stringvalue"} 2006/01/02-15:04:05.123456 . 0 received message [400 bytes, 300 compressed bytes] 2006/01/02-15:04:05.123456 . 1000 hello, world Attributes:{foo="bar"} 2006/01/02-15:04:05.123456 1.499000 hello, world 2006/01/02-15:04:05.123456 .500000 sent message [200 bytes, 100 compressed bytes] `; buf.String() != want { t.Errorf("writeTextTraces: got %q want %q\n", buf.String(), want) } } func TestGetZPages(t *testing.T) { mux := http.NewServeMux() Handle(mux, "/debug") server := httptest.NewServer(mux) defer server.Close() tests := []string{"/debug/rpcz", "/debug/tracez"} for _, tt := range tests { t.Run(fmt.Sprintf("GET %s", tt), func(t *testing.T) { res, err := http.Get(server.URL + tt) if err != nil { t.Error(err) return } if got, want := res.StatusCode, http.StatusOK; got != want { t.Errorf("res.StatusCode = %d; want %d", got, want) } }) } } func TestGetZPages_default(t *testing.T) { server := httptest.NewServer(Handler) defer server.Close() tests := []string{"/rpcz", "/tracez"} for _, tt := range tests { t.Run(fmt.Sprintf("GET %s", tt), func(t *testing.T) { res, err := http.Get(server.URL + tt) if err != nil { t.Error(err) return } if got, want := res.StatusCode, http.StatusOK; got != want { t.Errorf("res.StatusCode = %d; want %d", got, want) } }) } }