pax_global_header00006660000000000000000000000064140574107200014512gustar00rootroot0000000000000052 comment=8184d76b3b0bd3b01ed903690431ccb6826bf3e0 client_golang-1.11.0/000077500000000000000000000000001405741072000143775ustar00rootroot00000000000000client_golang-1.11.0/.circleci/000077500000000000000000000000001405741072000162325ustar00rootroot00000000000000client_golang-1.11.0/.circleci/config.yml000066400000000000000000000034371405741072000202310ustar00rootroot00000000000000version: 2.1 orbs: go: circleci/go@0.2.0 prometheus: prometheus/prometheus@0.11.0 jobs: test: parameters: go_version: type: string run_style_and_unused: type: boolean default: false run_lint: type: boolean default: false use_gomod_cache: type: boolean default: true docker: - image: circleci/golang:<< parameters.go_version >> working_directory: /go/src/github.com/prometheus/client_golang steps: - checkout - when: condition: << parameters.use_gomod_cache >> steps: - go/load-cache: key: v1-go<< parameters.go_version >> - run: make check_license test - when: condition: << parameters.run_lint >> steps: - run: make lint - when: condition: << parameters.run_style_and_unused >> steps: - run: make style unused - when: condition: << parameters.use_gomod_cache >> steps: - go/save-cache: key: v1-go<< parameters.go_version >> - store_test_results: path: test-results workflows: version: 2 client_golang: jobs: # Refer to README.md for the currently supported versions. - test: name: go-1-13 go_version: "1.13" run_lint: true - test: name: go-1-14 go_version: "1.14" run_lint: true - test: name: go-1-15 go_version: "1.15" run_lint: true - test: name: go-1-16 go_version: "1.16" run_lint: true # Style and unused/missing packages are only checked against # the latest supported Go version. run_style_and_unused: true client_golang-1.11.0/.github/000077500000000000000000000000001405741072000157375ustar00rootroot00000000000000client_golang-1.11.0/.github/ISSUE_TEMPLATE.md000066400000000000000000000024171405741072000204500ustar00rootroot00000000000000 client_golang-1.11.0/.gitignore000066400000000000000000000007541405741072000163750ustar00rootroot00000000000000# Examples examples/simple/simple examples/random/random # Typical backup/temporary files of editors *~ *# # Never include any accidentally created vendor dirs. # This is a library! vendor/ # The remainder of this file is taken from # https://github.com/github/gitignore/blob/master/Go.gitignore # Binaries for programs and plugins *.exe *.exe~ *.dll *.so *.dylib # Test binary, built with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out client_golang-1.11.0/.golangci.yml000066400000000000000000000002061405741072000167610ustar00rootroot00000000000000# Run only staticcheck for now. Additional linters will be enabled one-by-one. linters: enable: - staticcheck disable-all: true client_golang-1.11.0/CHANGELOG.md000066400000000000000000000364671405741072000162300ustar00rootroot00000000000000## 1.11.0 / 2021-06-07 * [ENHANCEMENT] API client: Add newer fields to Rules API #855 * [ENHANCEMENT] API client: Add missing fields to Targets API #856 * [FEATURE] API client: Add exemplars API support #861 * [CHANGE] Add new collectors package #862 ## 1.10.0 / 2021-03-18 * [CHANGE] Minimum required Go version is now 1.13. * [CHANGE] API client: Add matchers to `LabelNames` and `LabesValues`. #828 * [FEATURE] API client: Add buildinfo call. #841 * [BUGFIX] Fix build on riscv64. #833 ## 1.9.0 / 2020-12-17 * [FEATURE] `NewPidFileFn` helper to create process collectors for processes whose PID is read from a file. #804 * [BUGFIX] promhttp: Prevent endless loop in `InstrumentHandler...` middlewares with invalid metric or label names. #823 ## 1.8.0 / 2020-10-15 * [CHANGE] API client: Use `time.Time` rather than `string` for timestamps in `RuntimeinfoResult`. #777 * [FEATURE] Export `MetricVec` to facilitate implementation of vectors of custom `Metric` types. #803 * [FEATURE API client: Support `/status/tsdb` endpoint. #773 * [ENHANCEMENT] API client: Enable GET fallback on status code 501. #802 * [ENHANCEMENT] Remove `Metric` references after reslicing to free up more memory. #784 ## 1.7.1 / 2020-06-23 * [BUGFIX] API client: Actually propagate start/end parameters of `LabelNames` and `LabelValues`. #771 ## 1.7.0 / 2020-06-17 * [CHANGE] API client: Add start/end parameters to `LabelNames` and `LabelValues`. #767 * [FEATURE] testutil: Add `GatherAndCount` and enable filtering in `CollectAndCount` #753 * [FEATURE] API client: Add support for `status` and `runtimeinfo` endpoints. #755 * [ENHANCEMENT] Wrapping `nil` with a `WrapRegistererWith...` function creates a no-op `Registerer`. #764 * [ENHANCEMENT] promlint: Allow Kelvin as a base unit for cases like color temperature. #761 * [BUGFIX] push: Properly handle empty job and label values. #752 ## 1.6.0 / 2020-04-28 * [FEATURE] testutil: Add lint checks for metrics, including a sub-package `promlint` to expose the linter engine for external usage. #739 #743 * [ENHANCEMENT] API client: Improve error messages. #731 * [BUGFIX] process collector: Fix `process_resident_memory_bytes` on 32bit MS Windows. #734 ## 1.5.1 / 2020-03-14 * [BUGFIX] promhttp: Remove another superfluous `WriteHeader` call. #726 ## 1.5.0 / 2020-03-03 * [FEATURE] promauto: Add a factory to allow automatic registration with a local registry. #713 * [FEATURE] promauto: Add `NewUntypedFunc`. #713 * [FEATURE] API client: Support new metadata endpoint. #718 ## 1.4.1 / 2020-02-07 * [BUGFIX] Fix timestamp of exemplars in `CounterVec`. #710 ## 1.4.0 / 2020-01-27 * [CHANGE] Go collector: Improve doc string for `go_gc_duration_seconds`. #702 * [FEATURE] Support a subset of OpenMetrics, including exemplars. Needs opt-in via `promhttp.HandlerOpts`. **EXPERIMENTAL** #706 * [FEATURE] Add `testutil.CollectAndCount`. #703 ## 1.3.0 / 2019-12-21 * [FEATURE] Support tags in Graphite bridge. #668 * [BUGFIX] API client: Actually return Prometheus warnings. #699 ## 1.2.1 / 2019-10-17 * [BUGFIX] Fix regression in the implementation of `Registerer.Unregister`. #663 ## 1.2.0 / 2019-10-15 * [FEATURE] Support pushing to Pushgateway v0.10+. #652 * [ENHANCEMENT] Improve hashing to make a spurious `AlreadyRegisteredError` less likely to occur. #657 * [ENHANCEMENT] API client: Add godoc examples. #630 * [BUGFIX] promhttp: Correctly call WriteHeader in HTTP middleware. #634 ## 1.1.0 / 2019-08-01 * [CHANGE] API client: Format time as UTC rather than RFC3339Nano. #617 * [CHANGE] API client: Add warnings to `LabelValues` and `LabelNames` calls. #609 * [FEATURE] Push: Support base64 encoding in grouping key. #624 * [FEATURE] Push: Add Delete method to Pusher. #613 ## 1.0.0 / 2019-06-15 _This release removes all previously deprecated features, resulting in the breaking changes listed below. As this is v1.0.0, semantic versioning applies from now on, with the exception of the API client and parts marked explicitly as experimental._ * [CHANGE] Remove objectives from the default `Summary`. (Objectives have to be set explicitly in the `SummaryOpts`.) #600 * [CHANGE] Remove all HTTP related feature in the `prometheus` package. (Use the `promhttp` package instead.) #600 * [CHANGE] Remove `push.FromGatherer`, `push.AddFromGatherer`, `push.Collectors`. (Use `push.New` instead.) #600 * [CHANGE] API client: Pass warnings through on non-error responses. #599 * [CHANGE] API client: Add warnings to `Series` call. #603 * [FEATURE] Make process collector work on Microsoft Windows. **EXPERIMENTAL** #596 * [FEATURE] API client: Add `/labels` call. #604 * [BUGFIX] Make `AlreadyRegisteredError` usable for wrapped registries. #607 ## 0.9.4 / 2019-06-07 * [CHANGE] API client: Switch to alert values as strings. #585 * [FEATURE] Add a collector for Go module build information. #595 * [FEATURE] promhttp: Add an counter for internal errors during HTTP exposition. #594 * [FEATURE] API client: Support target metadata API. #590 * [FEATURE] API client: Support storage warnings. #562 * [ENHANCEMENT] API client: Improve performance handling JSON. #570 * [BUGFIX] Reduce test flakiness. #573 ## 0.9.3 / 2019-05-16 * [CHANGE] Required Go version is now 1.9+. #561 * [FEATURE] API client: Add POST with get fallback for Query/QueryRange. #557 * [FEATURE] API client: Add alerts endpoint. #552 * [FEATURE] API client: Add rules endpoint. #508 * [FEATURE] push: Add option to pick metrics format. #540 * [ENHANCEMENT] Limit time the Go collector may take to collect memstats, returning results from the previous collection in case of a timeout. #568 * [ENHANCEMENT] Pusher now requires only a thin interface instead of a full `http.Client`, facilitating mocking and custom HTTP client implementation. #559 * [ENHANCEMENT] Memory usage improvement for histograms and summaries without objectives. #536 * [ENHANCEMENT] Summaries without objectives are now lock-free. #521 * [BUGFIX] promhttp: `InstrumentRoundTripperTrace` now takes into account a pre-set context. #582 * [BUGFIX] `TestCounterAddLarge` now works on all platforms. #567 * [BUGFIX] Fix `promhttp` examples. #535 #544 * [BUGFIX] API client: Wait for done before writing to shared response body. #532 * [BUGFIX] API client: Deal with discovered labels properly. #529 ## 0.9.2 / 2018-12-06 * [FEATURE] Support for Go modules. #501 * [FEATURE] `Timer.ObserveDuration` returns observed duration. #509 * [ENHANCEMENT] Improved doc comments and error messages. #504 * [BUGFIX] Fix race condition during metrics gathering. #512 * [BUGFIX] Fix testutil metric comparison for Histograms and empty labels. #494 #498 ## 0.9.1 / 2018-11-03 * [FEATURE] Add `WriteToTextfile` function to facilitate the creation of *.prom files for the textfile collector of the node exporter. #489 * [ENHANCEMENT] More descriptive error messages for inconsistent label cardinality. #487 * [ENHANCEMENT] Exposition: Use a GZIP encoder pool to avoid allocations in high-frequency scrape scenarios. #366 * [ENHANCEMENT] Exposition: Streaming serving of metrics data while encoding. #482 * [ENHANCEMENT] API client: Add a way to return the body of a 5xx response. #479 ## 0.9.0 / 2018-10-15 * [CHANGE] Go1.6 is no longer supported. * [CHANGE] More refinements of the `Registry` consistency checks: Duplicated labels are now detected, but inconsistent label dimensions are now allowed. Collisions with the “magic” metric and label names in Summaries and Histograms are detected now. #108 #417 #471 * [CHANGE] Changed `ProcessCollector` constructor. #219 * [CHANGE] Changed Go counter `go_memstats_heap_released_bytes_total` to gauge `go_memstats_heap_released_bytes`. #229 * [CHANGE] Unexported `LabelPairSorter`. #453 * [CHANGE] Removed the `Untyped` metric from direct instrumentation. #340 * [CHANGE] Unexported `MetricVec`. #319 * [CHANGE] Removed deprecated `Set` method from `Counter` #247 * [CHANGE] Removed deprecated `RegisterOrGet` and `MustRegisterOrGet`. #247 * [CHANGE] API client: Introduced versioned packages. * [FEATURE] A `Registerer` can be wrapped with prefixes and labels. #357 * [FEATURE] “Describe by collect” helper function. #239 * [FEATURE] Added package `testutil`. #58 * [FEATURE] Timestamp can be explicitly set for const metrics. #187 * [FEATURE] “Unchecked” collectors are possible now without cheating. #47 * [FEATURE] Pushing to the Pushgateway reworked in package `push` to support many new features. (The old functions are still usable but deprecated.) #372 #341 * [FEATURE] Configurable connection limit for scrapes. #179 * [FEATURE] New HTTP middlewares to instrument `http.Handler` and `http.RoundTripper`. The old middlewares and the pre-instrumented `/metrics` handler are (strongly) deprecated. #316 #57 #101 #224 * [FEATURE] “Currying” for metric vectors. #320 * [FEATURE] A `Summary` can be created without quantiles. #118 * [FEATURE] Added a `Timer` helper type. #231 * [FEATURE] Added a Graphite bridge. #197 * [FEATURE] Help strings are now optional. #460 * [FEATURE] Added `process_virtual_memory_max_bytes` metric. #438 #440 * [FEATURE] Added `go_gc_cpu_fraction` and `go_threads` metrics. #281 #277 * [FEATURE] Added `promauto` package with auto-registering metrics. #385 #393 * [FEATURE] Add `SetToCurrentTime` method to `Gauge`. #259 * [FEATURE] API client: Add AlertManager, Status, and Target methods. #402 * [FEATURE] API client: Add admin methods. #398 * [FEATURE] API client: Support series API. #361 * [FEATURE] API client: Support querying label values. * [ENHANCEMENT] Smarter creation of goroutines during scraping. Solves memory usage spikes in certain situations. #369 * [ENHANCEMENT] Counters are now faster if dealing with integers only. #367 * [ENHANCEMENT] Improved label validation. #274 #335 * [BUGFIX] Creating a const metric with an invalid `Desc` returns an error. #460 * [BUGFIX] Histogram observations don't race any longer with exposition. #275 * [BUGFIX] Fixed goroutine leaks. #236 #472 * [BUGFIX] Fixed an error message for exponential histogram buckets. #467 * [BUGFIX] Fixed data race writing to the metric map. #401 * [BUGFIX] API client: Decode JSON on a 4xx respons but do not on 204 responses. #476 #414 ## 0.8.0 / 2016-08-17 * [CHANGE] Registry is doing more consistency checks. This might break existing setups that used to export inconsistent metrics. * [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow arbitrary grouping. * [CHANGE] Removed `SelfCollector`. * [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. * [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, `extraction`. * [CHANGE] Deprecated a number of functions. * [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` interfaces. * [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package `promhttp`) and enabling the creation of other exposition mechanisms. * [FEATURE] `MustRegister` is variadic now, allowing registration of many collectors in one call. * [FEATURE] Added HTTP API v1 package. * [ENHANCEMENT] Numerous documentation improvements. * [ENHANCEMENT] Improved metric sorting. * [ENHANCEMENT] Inlined fnv64a hashing for improved performance. * [ENHANCEMENT] Several test improvements. * [BUGFIX] Handle collisions in MetricVec. ## 0.7.0 / 2015-07-27 * [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. * [BUGFIX] Closed gaps in metric consistency check. * [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. * [ENHANCEMENT] Document the possibility to create "empty" metrics in a metric vector. * [ENHANCEMENT] Fix and clarify various doc comments and the README.md. * [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. * [ENHANCEMENT] Change responseWriterDelegator.written to int64. ## 0.6.0 / 2015-06-01 * [CHANGE] Rename process_goroutines to go_goroutines. * [ENHANCEMENT] Validate label names during YAML decoding. * [ENHANCEMENT] Add LabelName regular expression. * [BUGFIX] Ensure alignment of struct members for 32-bit systems. ## 0.5.0 / 2015-05-06 * [BUGFIX] Removed a weakness in the fingerprinting aka signature code. This makes fingerprinting slower and more allocation-heavy, but the weakness was too severe to be tolerated. * [CHANGE] As a result of the above, Metric.Fingerprint is now returning a different fingerprint. To keep the same fingerprint, the new method Metric.FastFingerprint was introduced, which will be used by the Prometheus server for storage purposes (implying that a collision detection has to be added, too). * [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on fingerprinting anymore, removing the possibility of an undetected fingerprint collision. * [FEATURE] The Go collector in the exposition library includes garbage collection stats. * [FEATURE] The exposition library allows to create constant "throw-away" summaries and histograms. * [CHANGE] A number of new reserved labels and prefixes. ## 0.4.0 / 2015-04-08 * [CHANGE] Return NaN when Summaries have no observations yet. * [BUGFIX] Properly handle Summary decay upon Write(). * [BUGFIX] Fix the documentation link to the consumption library. * [FEATURE] Allow the metric family injection hook to merge with existing metric families. * [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. * [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. ## 0.3.2 / 2015-03-11 * [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is only used by the Prometheus server internally. * [CLEANUP] Added licenses of vendored code left out by godep. ## 0.3.1 / 2015-03-04 * [ENHANCEMENT] Switched fingerprinting functions from own free list to sync.Pool. * [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). ## 0.3.0 / 2015-03-03 * [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. * [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was arguably broken.) * [CHANGE] Vendored dependencies. Those are only used by the Makefile. If client_golang is used as a library, the vendoring will stay out of your way. * [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made the fingerprinting change above necessary.) * [FEATURE] Added new fingerprinting functions SignatureForLabels and SignatureWithoutLabels to be used by the Prometheus server. These functions require fewer allocations than the ones currently used by the server. ## 0.2.0 / 2015-02-23 * [FEATURE] Introduce new Histagram metric type. * [CHANGE] Ignore process collector errors for now (better error handling pending). * [CHANGE] Use clear error interface for process pidFn. * [BUGFIX] Fix Go download links for several archs and OSes. * [ENHANCEMENT] Massively improve Gauge and Counter performance. * [ENHANCEMENT] Catch illegal label names for summaries in histograms. * [ENHANCEMENT] Reduce allocations during fingerprinting. * [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if both cgo is available and the build is for an OS with procfs. * [CLEANUP] Clean up code style issues. * [CLEANUP] Mark slow test as such and exclude them from travis. * [CLEANUP] Update protobuf library package name. * [CLEANUP] Updated vendoring of beorn7/perks. ## 0.1.0 / 2015-02-02 * [CLEANUP] Introduced semantic versioning and changelog. From now on, changes will be reported in this file. client_golang-1.11.0/CODE_OF_CONDUCT.md000066400000000000000000000002331405741072000171740ustar00rootroot00000000000000## Prometheus Community Code of Conduct Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). client_golang-1.11.0/CONTRIBUTING.md000066400000000000000000000016661405741072000166410ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) the maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Relevant coding style guidelines are the [Go Code Review Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) and the _Formatting and style_ section of Peter Bourgon's [Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). * Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) client_golang-1.11.0/Dockerfile000066400000000000000000000020331405741072000163670ustar00rootroot00000000000000# This Dockerfile builds an image for a client_golang example. # # Use as (from the root for the client_golang repository): # docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name . # Builder image, where we build the example. FROM golang:1 AS builder WORKDIR /go/src/github.com/prometheus/client_golang COPY . . WORKDIR /go/src/github.com/prometheus/client_golang/prometheus RUN go get -d WORKDIR /go/src/github.com/prometheus/client_golang/examples/random RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' WORKDIR /go/src/github.com/prometheus/client_golang/examples/simple RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' # Final image. FROM quay.io/prometheus/busybox:latest LABEL maintainer="The Prometheus Authors " COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random \ /go/src/github.com/prometheus/client_golang/examples/simple ./ EXPOSE 8080 CMD ["echo", "Please run an example. Either /random or /simple"] client_golang-1.11.0/LICENSE000066400000000000000000000261351405741072000154130ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. client_golang-1.11.0/MAINTAINERS.md000066400000000000000000000001441405741072000164720ustar00rootroot00000000000000* Bartłomiej Płotka @bwplotka * Kemal Akkoyun @kakkoyun client_golang-1.11.0/Makefile000066400000000000000000000012711405741072000160400ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. include Makefile.common .PHONY: test test: deps common-test .PHONY: test-short test-short: deps common-test-short client_golang-1.11.0/Makefile.common000066400000000000000000000234651405741072000173400ustar00rootroot00000000000000# Copyright 2018 The Prometheus Authors # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A common Makefile that includes rules to be reused in different prometheus projects. # !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! # Example usage : # Create the main Makefile in the root project directory. # include Makefile.common # customTarget: # @echo ">> Running customTarget" # # Ensure GOBIN is not set during build so that promu is installed to the correct path unexport GOBIN GO ?= go GOFMT ?= $(GO)fmt FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) GOOPTS ?= GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) GO_VERSION ?= $(shell $(GO) version) GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') GOVENDOR := GO111MODULE := ifeq (, $(PRE_GO_111)) ifneq (,$(wildcard go.mod)) # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). GO111MODULE := on ifneq (,$(wildcard vendor)) # Always use the local vendor/ directory to satisfy the dependencies. GOOPTS := $(GOOPTS) -mod=vendor endif endif else ifneq (,$(wildcard go.mod)) ifneq (,$(wildcard vendor)) $(warning This repository requires Go >= 1.11 because of Go modules) $(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') endif else # This repository isn't using Go modules (yet). GOVENDOR := $(FIRST_GOPATH)/bin/govendor endif endif PROMU := $(FIRST_GOPATH)/bin/promu pkgs = ./... ifeq (arm, $(GOHOSTARCH)) GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) else GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) endif GOTEST := $(GO) test GOTEST_DIR := ifneq ($(CIRCLE_JOB),) ifneq ($(shell which gotestsum),) GOTEST_DIR := test-results GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- endif endif PROMU_VERSION ?= 0.12.0 PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_VERSION ?= v1.39.0 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint endif endif PREFIX ?= $(shell pwd) BIN_DIR ?= $(shell pwd) DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) DOCKERFILE_PATH ?= ./Dockerfile DOCKERBUILD_CONTEXT ?= ./ DOCKER_REPO ?= prom DOCKER_ARCHS ?= amd64 BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) # Only supported on amd64 test-flags := -race endif endif # This rule is used to forward a target like "build" to "common-build". This # allows a new "build" target to be defined in a Makefile which includes this # one and override "common-build" without override warnings. %: common-% ; .PHONY: common-all common-all: precheck style check_license lint unused build test .PHONY: common-style common-style: @echo ">> checking code style" @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ if [ -n "$${fmtRes}" ]; then \ echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ echo "Please ensure you are using $$($(GO) version) for formatting code."; \ exit 1; \ fi .PHONY: common-check_license common-check_license: @echo ">> checking license header" @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ done); \ if [ -n "$${licRes}" ]; then \ echo "license header checking failed:"; echo "$${licRes}"; \ exit 1; \ fi .PHONY: common-deps common-deps: @echo ">> getting dependencies" ifdef GO111MODULE GO111MODULE=$(GO111MODULE) $(GO) mod download else $(GO) get $(GOOPTS) -t ./... endif .PHONY: update-go-deps update-go-deps: @echo ">> updating Go dependencies" @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ $(GO) get $$m; \ done GO111MODULE=$(GO111MODULE) $(GO) mod tidy ifneq (,$(wildcard vendor)) GO111MODULE=$(GO111MODULE) $(GO) mod vendor endif .PHONY: common-test-short common-test-short: $(GOTEST_DIR) @echo ">> running short tests" GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs) .PHONY: common-test common-test: $(GOTEST_DIR) @echo ">> running all tests" GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) $(GOTEST_DIR): @mkdir -p $@ .PHONY: common-format common-format: @echo ">> formatting code" GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) .PHONY: common-vet common-vet: @echo ">> vetting code" GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) .PHONY: common-lint common-lint: $(GOLANGCI_LINT) ifdef GOLANGCI_LINT @echo ">> running golangci-lint" ifdef GO111MODULE # 'go list' needs to be executed before staticcheck to prepopulate the modules cache. # Otherwise staticcheck might fail randomly for some reason not yet explained. GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) else $(GOLANGCI_LINT) run $(pkgs) endif endif # For backward-compatibility. .PHONY: common-staticcheck common-staticcheck: lint .PHONY: common-unused common-unused: $(GOVENDOR) ifdef GOVENDOR @echo ">> running check for unused packages" @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' else ifdef GO111MODULE @echo ">> running check for unused/missing packages in go.mod" GO111MODULE=$(GO111MODULE) $(GO) mod tidy ifeq (,$(wildcard vendor)) @git diff --exit-code -- go.sum go.mod else @echo ">> running check for unused packages in vendor/" GO111MODULE=$(GO111MODULE) $(GO) mod vendor @git diff --exit-code -- go.sum go.mod vendor/ endif endif endif .PHONY: common-build common-build: promu @echo ">> building binaries" GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) .PHONY: common-tarball common-tarball: promu @echo ">> building release tarball" $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) .PHONY: common-docker $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS) $(BUILD_DOCKER_ARCHS): common-docker-%: docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ -f $(DOCKERFILE_PATH) \ --build-arg ARCH="$*" \ --build-arg OS="linux" \ $(DOCKERBUILD_CONTEXT) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS) $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS) $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" .PHONY: common-docker-manifest common-docker-manifest: DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .PHONY: promu promu: $(PROMU) $(PROMU): $(eval PROMU_TMP := $(shell mktemp -d)) curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) mkdir -p $(FIRST_GOPATH)/bin cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu rm -r $(PROMU_TMP) .PHONY: proto proto: @echo ">> generating code from proto files" @./scripts/genproto.sh ifdef GOLANGCI_LINT $(GOLANGCI_LINT): mkdir -p $(FIRST_GOPATH)/bin curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ | sed -e '/install -d/d' \ | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) endif ifdef GOVENDOR .PHONY: $(GOVENDOR) $(GOVENDOR): GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor endif .PHONY: precheck precheck:: define PRECHECK_COMMAND_template = precheck:: $(1)_precheck PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) .PHONY: $(1)_precheck $(1)_precheck: @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ exit 1; \ fi endef client_golang-1.11.0/NOTICE000066400000000000000000000015121405741072000153020ustar00rootroot00000000000000Prometheus instrumentation library for Go applications Copyright 2012-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). The following components are included in this product: perks - a fork of https://github.com/bmizerany/perks https://github.com/beorn7/perks Copyright 2013-2015 Blake Mizerany, Björn Rabenstein See https://github.com/beorn7/perks/blob/master/README.md for license details. Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. Support for streaming Protocol Buffer messages for the Go language (golang). https://github.com/matttproud/golang_protobuf_extensions Copyright 2013 Matt T. Proud Licensed under the Apache License, Version 2.0 client_golang-1.11.0/README.md000066400000000000000000000067561405741072000156740ustar00rootroot00000000000000# Prometheus Go client library [![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) [![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang) [![go-doc](https://godoc.org/github.com/prometheus/client_golang?status.svg)](https://godoc.org/github.com/prometheus/client_golang) This is the [Go](http://golang.org) client library for [Prometheus](http://prometheus.io). It has two separate parts, one for instrumenting application code, and one for creating clients that talk to the Prometheus HTTP API. __This library requires Go1.13 or later.__ ## Important note about releases and stability This repository generally follows [Semantic Versioning](https://semver.org/). However, the API client in prometheus/client_golang/api/… is still considered experimental. Breaking changes of the API client will _not_ trigger a new major release. The same is true for selected other new features explicitly marked as **EXPERIMENTAL** in CHANGELOG.md. Features that require breaking changes in the stable parts of the repository are being batched up and tracked in the [v2 milestone](https://github.com/prometheus/client_golang/milestone/2). The v2 development happens in a [separate branch](https://github.com/prometheus/client_golang/tree/dev-v2) for the time being. v2 releases off that branch will happen once sufficient stability is reached. In view of the widespread use of this repository, v1 and v2 will coexist for a while to enable a convenient transition. ## Instrumenting applications [![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) The [`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) contains the instrumentation library. See the [guide](https://prometheus.io/docs/guides/go-application/) on the Prometheus website to learn more about instrumenting applications. The [`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) contains simple examples of instrumented code. ## Client for the Prometheus HTTP API [![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus/v1)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus/v1) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api) The [`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) contains the client for the [Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you to write Go applications that query time series data from a Prometheus server. It is still in alpha stage. ## Where is `model`, `extraction`, and `text`? The `model` packages has been moved to [`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). The `extraction` and `text` packages are now contained in [`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). ## Contributing and community See the [contributing guidelines](CONTRIBUTING.md) and the [Community section](http://prometheus.io/community/) of the homepage. client_golang-1.11.0/SECURITY.md000066400000000000000000000002521405741072000161670ustar00rootroot00000000000000# Reporting a security issue The Prometheus security policy, including how to report vulnerabilities, can be found here: https://prometheus.io/docs/operating/security/ client_golang-1.11.0/VERSION000066400000000000000000000000071405741072000154440ustar00rootroot000000000000001.11.0 client_golang-1.11.0/api/000077500000000000000000000000001405741072000151505ustar00rootroot00000000000000client_golang-1.11.0/api/client.go000066400000000000000000000055711405741072000167650ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package api provides clients for the HTTP APIs. package api import ( "context" "io/ioutil" "net" "net/http" "net/url" "path" "strings" "time" ) // DefaultRoundTripper is used if no RoundTripper is set in Config. var DefaultRoundTripper http.RoundTripper = &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).DialContext, TLSHandshakeTimeout: 10 * time.Second, } // Config defines configuration parameters for a new client. type Config struct { // The address of the Prometheus to connect to. Address string // RoundTripper is used by the Client to drive HTTP requests. If not // provided, DefaultRoundTripper will be used. RoundTripper http.RoundTripper } func (cfg *Config) roundTripper() http.RoundTripper { if cfg.RoundTripper == nil { return DefaultRoundTripper } return cfg.RoundTripper } // Client is the interface for an API client. type Client interface { URL(ep string, args map[string]string) *url.URL Do(context.Context, *http.Request) (*http.Response, []byte, error) } // NewClient returns a new Client. // // It is safe to use the returned Client from multiple goroutines. func NewClient(cfg Config) (Client, error) { u, err := url.Parse(cfg.Address) if err != nil { return nil, err } u.Path = strings.TrimRight(u.Path, "/") return &httpClient{ endpoint: u, client: http.Client{Transport: cfg.roundTripper()}, }, nil } type httpClient struct { endpoint *url.URL client http.Client } func (c *httpClient) URL(ep string, args map[string]string) *url.URL { p := path.Join(c.endpoint.Path, ep) for arg, val := range args { arg = ":" + arg p = strings.Replace(p, arg, val, -1) } u := *c.endpoint u.Path = p return &u } func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { if ctx != nil { req = req.WithContext(ctx) } resp, err := c.client.Do(req) defer func() { if resp != nil { resp.Body.Close() } }() if err != nil { return nil, nil, err } var body []byte done := make(chan struct{}) go func() { body, err = ioutil.ReadAll(resp.Body) close(done) }() select { case <-ctx.Done(): <-done err = resp.Body.Close() if err == nil { err = ctx.Err() } case <-done: } return resp, body, err } client_golang-1.11.0/api/client_test.go000066400000000000000000000052461405741072000200230ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package api import ( "net/http" "net/url" "testing" ) func TestConfig(t *testing.T) { c := Config{} if c.roundTripper() != DefaultRoundTripper { t.Fatalf("expected default roundtripper for nil RoundTripper field") } } func TestClientURL(t *testing.T) { tests := []struct { address string endpoint string args map[string]string expected string }{ { address: "http://localhost:9090", endpoint: "/test", expected: "http://localhost:9090/test", }, { address: "http://localhost", endpoint: "/test", expected: "http://localhost/test", }, { address: "http://localhost:9090", endpoint: "test", expected: "http://localhost:9090/test", }, { address: "http://localhost:9090/prefix", endpoint: "/test", expected: "http://localhost:9090/prefix/test", }, { address: "https://localhost:9090/", endpoint: "/test/", expected: "https://localhost:9090/test", }, { address: "http://localhost:9090", endpoint: "/test/:param", args: map[string]string{ "param": "content", }, expected: "http://localhost:9090/test/content", }, { address: "http://localhost:9090", endpoint: "/test/:param/more/:param", args: map[string]string{ "param": "content", }, expected: "http://localhost:9090/test/content/more/content", }, { address: "http://localhost:9090", endpoint: "/test/:param/more/:foo", args: map[string]string{ "param": "content", "foo": "bar", }, expected: "http://localhost:9090/test/content/more/bar", }, { address: "http://localhost:9090", endpoint: "/test/:param", args: map[string]string{ "nonexistent": "content", }, expected: "http://localhost:9090/test/:param", }, } for _, test := range tests { ep, err := url.Parse(test.address) if err != nil { t.Fatal(err) } hclient := &httpClient{ endpoint: ep, client: http.Client{Transport: DefaultRoundTripper}, } u := hclient.URL(test.endpoint, test.args) if u.String() != test.expected { t.Errorf("unexpected result: got %s, want %s", u, test.expected) continue } } } client_golang-1.11.0/api/prometheus/000077500000000000000000000000001405741072000173435ustar00rootroot00000000000000client_golang-1.11.0/api/prometheus/v1/000077500000000000000000000000001405741072000176715ustar00rootroot00000000000000client_golang-1.11.0/api/prometheus/v1/api.go000066400000000000000000001001251405741072000207700ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package v1 provides bindings to the Prometheus HTTP API v1: // http://prometheus.io/docs/querying/api/ package v1 import ( "context" "errors" "fmt" "math" "net/http" "net/url" "strconv" "strings" "time" "unsafe" json "github.com/json-iterator/go" "github.com/prometheus/common/model" "github.com/prometheus/client_golang/api" ) func init() { json.RegisterTypeEncoderFunc("model.SamplePair", marshalPointJSON, marshalPointJSONIsEmpty) json.RegisterTypeDecoderFunc("model.SamplePair", unMarshalPointJSON) } func unMarshalPointJSON(ptr unsafe.Pointer, iter *json.Iterator) { p := (*model.SamplePair)(ptr) if !iter.ReadArray() { iter.ReportError("unmarshal model.SamplePair", "SamplePair must be [timestamp, value]") return } t := iter.ReadNumber() if err := p.Timestamp.UnmarshalJSON([]byte(t)); err != nil { iter.ReportError("unmarshal model.SamplePair", err.Error()) return } if !iter.ReadArray() { iter.ReportError("unmarshal model.SamplePair", "SamplePair missing value") return } f, err := strconv.ParseFloat(iter.ReadString(), 64) if err != nil { iter.ReportError("unmarshal model.SamplePair", err.Error()) return } p.Value = model.SampleValue(f) if iter.ReadArray() { iter.ReportError("unmarshal model.SamplePair", "SamplePair has too many values, must be [timestamp, value]") return } } func marshalPointJSON(ptr unsafe.Pointer, stream *json.Stream) { p := *((*model.SamplePair)(ptr)) stream.WriteArrayStart() // Write out the timestamp as a float divided by 1000. // This is ~3x faster than converting to a float. t := int64(p.Timestamp) if t < 0 { stream.WriteRaw(`-`) t = -t } stream.WriteInt64(t / 1000) fraction := t % 1000 if fraction != 0 { stream.WriteRaw(`.`) if fraction < 100 { stream.WriteRaw(`0`) } if fraction < 10 { stream.WriteRaw(`0`) } stream.WriteInt64(fraction) } stream.WriteMore() stream.WriteRaw(`"`) // Taken from https://github.com/json-iterator/go/blob/master/stream_float.go#L71 as a workaround // to https://github.com/json-iterator/go/issues/365 (jsoniter, to follow json standard, doesn't allow inf/nan) buf := stream.Buffer() abs := math.Abs(float64(p.Value)) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. if abs != 0 { if abs < 1e-6 || abs >= 1e21 { fmt = 'e' } } buf = strconv.AppendFloat(buf, float64(p.Value), fmt, -1, 64) stream.SetBuffer(buf) stream.WriteRaw(`"`) stream.WriteArrayEnd() } func marshalPointJSONIsEmpty(ptr unsafe.Pointer) bool { return false } const ( apiPrefix = "/api/v1" epAlerts = apiPrefix + "/alerts" epAlertManagers = apiPrefix + "/alertmanagers" epQuery = apiPrefix + "/query" epQueryRange = apiPrefix + "/query_range" epQueryExemplars = apiPrefix + "/query_exemplars" epLabels = apiPrefix + "/labels" epLabelValues = apiPrefix + "/label/:name/values" epSeries = apiPrefix + "/series" epTargets = apiPrefix + "/targets" epTargetsMetadata = apiPrefix + "/targets/metadata" epMetadata = apiPrefix + "/metadata" epRules = apiPrefix + "/rules" epSnapshot = apiPrefix + "/admin/tsdb/snapshot" epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series" epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones" epConfig = apiPrefix + "/status/config" epFlags = apiPrefix + "/status/flags" epBuildinfo = apiPrefix + "/status/buildinfo" epRuntimeinfo = apiPrefix + "/status/runtimeinfo" epTSDB = apiPrefix + "/status/tsdb" ) // AlertState models the state of an alert. type AlertState string // ErrorType models the different API error types. type ErrorType string // HealthStatus models the health status of a scrape target. type HealthStatus string // RuleType models the type of a rule. type RuleType string // RuleHealth models the health status of a rule. type RuleHealth string // MetricType models the type of a metric. type MetricType string const ( // Possible values for AlertState. AlertStateFiring AlertState = "firing" AlertStateInactive AlertState = "inactive" AlertStatePending AlertState = "pending" // Possible values for ErrorType. ErrBadData ErrorType = "bad_data" ErrTimeout ErrorType = "timeout" ErrCanceled ErrorType = "canceled" ErrExec ErrorType = "execution" ErrBadResponse ErrorType = "bad_response" ErrServer ErrorType = "server_error" ErrClient ErrorType = "client_error" // Possible values for HealthStatus. HealthGood HealthStatus = "up" HealthUnknown HealthStatus = "unknown" HealthBad HealthStatus = "down" // Possible values for RuleType. RuleTypeRecording RuleType = "recording" RuleTypeAlerting RuleType = "alerting" // Possible values for RuleHealth. RuleHealthGood = "ok" RuleHealthUnknown = "unknown" RuleHealthBad = "err" // Possible values for MetricType MetricTypeCounter MetricType = "counter" MetricTypeGauge MetricType = "gauge" MetricTypeHistogram MetricType = "histogram" MetricTypeGaugeHistogram MetricType = "gaugehistogram" MetricTypeSummary MetricType = "summary" MetricTypeInfo MetricType = "info" MetricTypeStateset MetricType = "stateset" MetricTypeUnknown MetricType = "unknown" ) // Error is an error returned by the API. type Error struct { Type ErrorType Msg string Detail string } func (e *Error) Error() string { return fmt.Sprintf("%s: %s", e.Type, e.Msg) } // Range represents a sliced time range. type Range struct { // The boundaries of the time range. Start, End time.Time // The maximum time between two slices within the boundaries. Step time.Duration } // API provides bindings for Prometheus's v1 API. type API interface { // Alerts returns a list of all active alerts. Alerts(ctx context.Context) (AlertsResult, error) // AlertManagers returns an overview of the current state of the Prometheus alert manager discovery. AlertManagers(ctx context.Context) (AlertManagersResult, error) // CleanTombstones removes the deleted data from disk and cleans up the existing tombstones. CleanTombstones(ctx context.Context) error // Config returns the current Prometheus configuration. Config(ctx context.Context) (ConfigResult, error) // DeleteSeries deletes data for a selection of series in a time range. DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error // Flags returns the flag values that Prometheus was launched with. Flags(ctx context.Context) (FlagsResult, error) // LabelNames returns the unique label names present in the block in sorted order by given time range and matchers. LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error) // LabelValues performs a query for the values of the given label, time range and matchers. LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) // Query performs a query for the given time. Query(ctx context.Context, query string, ts time.Time) (model.Value, Warnings, error) // QueryRange performs a query for the given range. QueryRange(ctx context.Context, query string, r Range) (model.Value, Warnings, error) // QueryExemplars performs a query for exemplars by the given query and time range. QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error) // Buildinfo returns various build information properties about the Prometheus server Buildinfo(ctx context.Context) (BuildinfoResult, error) // Runtimeinfo returns the various runtime information properties about the Prometheus server. Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) // Series finds series by label matchers. Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) // Snapshot creates a snapshot of all current data into snapshots/- // under the TSDB's data directory and returns the directory as response. Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) // Rules returns a list of alerting and recording rules that are currently loaded. Rules(ctx context.Context) (RulesResult, error) // Targets returns an overview of the current state of the Prometheus target discovery. Targets(ctx context.Context) (TargetsResult, error) // TargetsMetadata returns metadata about metrics currently scraped by the target. TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) // Metadata returns metadata about metrics currently scraped by the metric name. Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) // TSDB returns the cardinality statistics. TSDB(ctx context.Context) (TSDBResult, error) } // AlertsResult contains the result from querying the alerts endpoint. type AlertsResult struct { Alerts []Alert `json:"alerts"` } // AlertManagersResult contains the result from querying the alertmanagers endpoint. type AlertManagersResult struct { Active []AlertManager `json:"activeAlertManagers"` Dropped []AlertManager `json:"droppedAlertManagers"` } // AlertManager models a configured Alert Manager. type AlertManager struct { URL string `json:"url"` } // ConfigResult contains the result from querying the config endpoint. type ConfigResult struct { YAML string `json:"yaml"` } // FlagsResult contains the result from querying the flag endpoint. type FlagsResult map[string]string // BuildinfoResult contains the results from querying the buildinfo endpoint. type BuildinfoResult struct { Version string `json:"version"` Revision string `json:"revision"` Branch string `json:"branch"` BuildUser string `json:"buildUser"` BuildDate string `json:"buildDate"` GoVersion string `json:"goVersion"` } // RuntimeinfoResult contains the result from querying the runtimeinfo endpoint. type RuntimeinfoResult struct { StartTime time.Time `json:"startTime"` CWD string `json:"CWD"` ReloadConfigSuccess bool `json:"reloadConfigSuccess"` LastConfigTime time.Time `json:"lastConfigTime"` ChunkCount int `json:"chunkCount"` TimeSeriesCount int `json:"timeSeriesCount"` CorruptionCount int `json:"corruptionCount"` GoroutineCount int `json:"goroutineCount"` GOMAXPROCS int `json:"GOMAXPROCS"` GOGC string `json:"GOGC"` GODEBUG string `json:"GODEBUG"` StorageRetention string `json:"storageRetention"` } // SnapshotResult contains the result from querying the snapshot endpoint. type SnapshotResult struct { Name string `json:"name"` } // RulesResult contains the result from querying the rules endpoint. type RulesResult struct { Groups []RuleGroup `json:"groups"` } // RuleGroup models a rule group that contains a set of recording and alerting rules. type RuleGroup struct { Name string `json:"name"` File string `json:"file"` Interval float64 `json:"interval"` Rules Rules `json:"rules"` } // Recording and alerting rules are stored in the same slice to preserve the order // that rules are returned in by the API. // // Rule types can be determined using a type switch: // switch v := rule.(type) { // case RecordingRule: // fmt.Print("got a recording rule") // case AlertingRule: // fmt.Print("got a alerting rule") // default: // fmt.Printf("unknown rule type %s", v) // } type Rules []interface{} // AlertingRule models a alerting rule. type AlertingRule struct { Name string `json:"name"` Query string `json:"query"` Duration float64 `json:"duration"` Labels model.LabelSet `json:"labels"` Annotations model.LabelSet `json:"annotations"` Alerts []*Alert `json:"alerts"` Health RuleHealth `json:"health"` LastError string `json:"lastError,omitempty"` EvaluationTime float64 `json:"evaluationTime"` LastEvaluation time.Time `json:"lastEvaluation"` State string `json:"state"` } // RecordingRule models a recording rule. type RecordingRule struct { Name string `json:"name"` Query string `json:"query"` Labels model.LabelSet `json:"labels,omitempty"` Health RuleHealth `json:"health"` LastError string `json:"lastError,omitempty"` EvaluationTime float64 `json:"evaluationTime"` LastEvaluation time.Time `json:"lastEvaluation"` } // Alert models an active alert. type Alert struct { ActiveAt time.Time `json:"activeAt"` Annotations model.LabelSet Labels model.LabelSet State AlertState Value string } // TargetsResult contains the result from querying the targets endpoint. type TargetsResult struct { Active []ActiveTarget `json:"activeTargets"` Dropped []DroppedTarget `json:"droppedTargets"` } // ActiveTarget models an active Prometheus scrape target. type ActiveTarget struct { DiscoveredLabels map[string]string `json:"discoveredLabels"` Labels model.LabelSet `json:"labels"` ScrapePool string `json:"scrapePool"` ScrapeURL string `json:"scrapeUrl"` GlobalURL string `json:"globalUrl"` LastError string `json:"lastError"` LastScrape time.Time `json:"lastScrape"` LastScrapeDuration float64 `json:"lastScrapeDuration"` Health HealthStatus `json:"health"` } // DroppedTarget models a dropped Prometheus scrape target. type DroppedTarget struct { DiscoveredLabels map[string]string `json:"discoveredLabels"` } // MetricMetadata models the metadata of a metric with its scrape target and name. type MetricMetadata struct { Target map[string]string `json:"target"` Metric string `json:"metric,omitempty"` Type MetricType `json:"type"` Help string `json:"help"` Unit string `json:"unit"` } // Metadata models the metadata of a metric. type Metadata struct { Type MetricType `json:"type"` Help string `json:"help"` Unit string `json:"unit"` } // queryResult contains result data for a query. type queryResult struct { Type model.ValueType `json:"resultType"` Result interface{} `json:"result"` // The decoded value. v model.Value } // TSDBResult contains the result from querying the tsdb endpoint. type TSDBResult struct { SeriesCountByMetricName []Stat `json:"seriesCountByMetricName"` LabelValueCountByLabelName []Stat `json:"labelValueCountByLabelName"` MemoryInBytesByLabelName []Stat `json:"memoryInBytesByLabelName"` SeriesCountByLabelValuePair []Stat `json:"seriesCountByLabelValuePair"` } // Stat models information about statistic value. type Stat struct { Name string `json:"name"` Value uint64 `json:"value"` } func (rg *RuleGroup) UnmarshalJSON(b []byte) error { v := struct { Name string `json:"name"` File string `json:"file"` Interval float64 `json:"interval"` Rules []json.RawMessage `json:"rules"` }{} if err := json.Unmarshal(b, &v); err != nil { return err } rg.Name = v.Name rg.File = v.File rg.Interval = v.Interval for _, rule := range v.Rules { alertingRule := AlertingRule{} if err := json.Unmarshal(rule, &alertingRule); err == nil { rg.Rules = append(rg.Rules, alertingRule) continue } recordingRule := RecordingRule{} if err := json.Unmarshal(rule, &recordingRule); err == nil { rg.Rules = append(rg.Rules, recordingRule) continue } return errors.New("failed to decode JSON into an alerting or recording rule") } return nil } func (r *AlertingRule) UnmarshalJSON(b []byte) error { v := struct { Type string `json:"type"` }{} if err := json.Unmarshal(b, &v); err != nil { return err } if v.Type == "" { return errors.New("type field not present in rule") } if v.Type != string(RuleTypeAlerting) { return fmt.Errorf("expected rule of type %s but got %s", string(RuleTypeAlerting), v.Type) } rule := struct { Name string `json:"name"` Query string `json:"query"` Duration float64 `json:"duration"` Labels model.LabelSet `json:"labels"` Annotations model.LabelSet `json:"annotations"` Alerts []*Alert `json:"alerts"` Health RuleHealth `json:"health"` LastError string `json:"lastError,omitempty"` EvaluationTime float64 `json:"evaluationTime"` LastEvaluation time.Time `json:"lastEvaluation"` State string `json:"state"` }{} if err := json.Unmarshal(b, &rule); err != nil { return err } r.Health = rule.Health r.Annotations = rule.Annotations r.Name = rule.Name r.Query = rule.Query r.Alerts = rule.Alerts r.Duration = rule.Duration r.Labels = rule.Labels r.LastError = rule.LastError r.EvaluationTime = rule.EvaluationTime r.LastEvaluation = rule.LastEvaluation r.State = rule.State return nil } func (r *RecordingRule) UnmarshalJSON(b []byte) error { v := struct { Type string `json:"type"` }{} if err := json.Unmarshal(b, &v); err != nil { return err } if v.Type == "" { return errors.New("type field not present in rule") } if v.Type != string(RuleTypeRecording) { return fmt.Errorf("expected rule of type %s but got %s", string(RuleTypeRecording), v.Type) } rule := struct { Name string `json:"name"` Query string `json:"query"` Labels model.LabelSet `json:"labels,omitempty"` Health RuleHealth `json:"health"` LastError string `json:"lastError,omitempty"` EvaluationTime float64 `json:"evaluationTime"` LastEvaluation time.Time `json:"lastEvaluation"` }{} if err := json.Unmarshal(b, &rule); err != nil { return err } r.Health = rule.Health r.Labels = rule.Labels r.Name = rule.Name r.LastError = rule.LastError r.Query = rule.Query r.EvaluationTime = rule.EvaluationTime r.LastEvaluation = rule.LastEvaluation return nil } func (qr *queryResult) UnmarshalJSON(b []byte) error { v := struct { Type model.ValueType `json:"resultType"` Result json.RawMessage `json:"result"` }{} err := json.Unmarshal(b, &v) if err != nil { return err } switch v.Type { case model.ValScalar: var sv model.Scalar err = json.Unmarshal(v.Result, &sv) qr.v = &sv case model.ValVector: var vv model.Vector err = json.Unmarshal(v.Result, &vv) qr.v = vv case model.ValMatrix: var mv model.Matrix err = json.Unmarshal(v.Result, &mv) qr.v = mv default: err = fmt.Errorf("unexpected value type %q", v.Type) } return err } // Exemplar is additional information associated with a time series. type Exemplar struct { Labels model.LabelSet `json:"labels"` Value model.SampleValue `json:"value"` Timestamp model.Time `json:"timestamp"` } type ExemplarQueryResult struct { SeriesLabels model.LabelSet `json:"seriesLabels"` Exemplars []Exemplar `json:"exemplars"` } // NewAPI returns a new API for the client. // // It is safe to use the returned API from multiple goroutines. func NewAPI(c api.Client) API { return &httpAPI{ client: &apiClientImpl{ client: c, }, } } type httpAPI struct { client apiClient } func (h *httpAPI) Alerts(ctx context.Context) (AlertsResult, error) { u := h.client.URL(epAlerts, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return AlertsResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return AlertsResult{}, err } var res AlertsResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) { u := h.client.URL(epAlertManagers, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return AlertManagersResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return AlertManagersResult{}, err } var res AlertManagersResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) CleanTombstones(ctx context.Context) error { u := h.client.URL(epCleanTombstones, nil) req, err := http.NewRequest(http.MethodPost, u.String(), nil) if err != nil { return err } _, _, _, err = h.client.Do(ctx, req) return err } func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) { u := h.client.URL(epConfig, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return ConfigResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return ConfigResult{}, err } var res ConfigResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error { u := h.client.URL(epDeleteSeries, nil) q := u.Query() for _, m := range matches { q.Add("match[]", m) } q.Set("start", formatTime(startTime)) q.Set("end", formatTime(endTime)) u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodPost, u.String(), nil) if err != nil { return err } _, _, _, err = h.client.Do(ctx, req) return err } func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) { u := h.client.URL(epFlags, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return FlagsResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return FlagsResult{}, err } var res FlagsResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) Buildinfo(ctx context.Context) (BuildinfoResult, error) { u := h.client.URL(epBuildinfo, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return BuildinfoResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return BuildinfoResult{}, err } var res BuildinfoResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) Runtimeinfo(ctx context.Context) (RuntimeinfoResult, error) { u := h.client.URL(epRuntimeinfo, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return RuntimeinfoResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return RuntimeinfoResult{}, err } var res RuntimeinfoResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]string, Warnings, error) { u := h.client.URL(epLabels, nil) q := u.Query() q.Set("start", formatTime(startTime)) q.Set("end", formatTime(endTime)) for _, m := range matches { q.Add("match[]", m) } u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, nil, err } _, body, w, err := h.client.Do(ctx, req) if err != nil { return nil, w, err } var labelNames []string return labelNames, w, json.Unmarshal(body, &labelNames) } func (h *httpAPI) LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time) (model.LabelValues, Warnings, error) { u := h.client.URL(epLabelValues, map[string]string{"name": label}) q := u.Query() q.Set("start", formatTime(startTime)) q.Set("end", formatTime(endTime)) for _, m := range matches { q.Add("match[]", m) } u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, nil, err } _, body, w, err := h.client.Do(ctx, req) if err != nil { return nil, w, err } var labelValues model.LabelValues return labelValues, w, json.Unmarshal(body, &labelValues) } func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, Warnings, error) { u := h.client.URL(epQuery, nil) q := u.Query() q.Set("query", query) if !ts.IsZero() { q.Set("time", formatTime(ts)) } _, body, warnings, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, warnings, err } var qres queryResult return model.Value(qres.v), warnings, json.Unmarshal(body, &qres) } func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, Warnings, error) { u := h.client.URL(epQueryRange, nil) q := u.Query() q.Set("query", query) q.Set("start", formatTime(r.Start)) q.Set("end", formatTime(r.End)) q.Set("step", strconv.FormatFloat(r.Step.Seconds(), 'f', -1, 64)) _, body, warnings, err := h.client.DoGetFallback(ctx, u, q) if err != nil { return nil, warnings, err } var qres queryResult return model.Value(qres.v), warnings, json.Unmarshal(body, &qres) } func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, Warnings, error) { u := h.client.URL(epSeries, nil) q := u.Query() for _, m := range matches { q.Add("match[]", m) } q.Set("start", formatTime(startTime)) q.Set("end", formatTime(endTime)) u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, nil, err } _, body, warnings, err := h.client.Do(ctx, req) if err != nil { return nil, warnings, err } var mset []model.LabelSet return mset, warnings, json.Unmarshal(body, &mset) } func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) { u := h.client.URL(epSnapshot, nil) q := u.Query() q.Set("skip_head", strconv.FormatBool(skipHead)) u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodPost, u.String(), nil) if err != nil { return SnapshotResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return SnapshotResult{}, err } var res SnapshotResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) Rules(ctx context.Context) (RulesResult, error) { u := h.client.URL(epRules, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return RulesResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return RulesResult{}, err } var res RulesResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) { u := h.client.URL(epTargets, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return TargetsResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return TargetsResult{}, err } var res TargetsResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]MetricMetadata, error) { u := h.client.URL(epTargetsMetadata, nil) q := u.Query() q.Set("match_target", matchTarget) q.Set("metric", metric) q.Set("limit", limit) u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return nil, err } var res []MetricMetadata return res, json.Unmarshal(body, &res) } func (h *httpAPI) Metadata(ctx context.Context, metric string, limit string) (map[string][]Metadata, error) { u := h.client.URL(epMetadata, nil) q := u.Query() q.Set("metric", metric) q.Set("limit", limit) u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return nil, err } var res map[string][]Metadata return res, json.Unmarshal(body, &res) } func (h *httpAPI) TSDB(ctx context.Context) (TSDBResult, error) { u := h.client.URL(epTSDB, nil) req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return TSDBResult{}, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return TSDBResult{}, err } var res TSDBResult return res, json.Unmarshal(body, &res) } func (h *httpAPI) QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]ExemplarQueryResult, error) { u := h.client.URL(epQueryExemplars, nil) q := u.Query() q.Set("query", query) q.Set("start", formatTime(startTime)) q.Set("end", formatTime(endTime)) u.RawQuery = q.Encode() req, err := http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, err } _, body, _, err := h.client.Do(ctx, req) if err != nil { return nil, err } var res []ExemplarQueryResult return res, json.Unmarshal(body, &res) } // Warnings is an array of non critical errors type Warnings []string // apiClient wraps a regular client and processes successful API responses. // Successful also includes responses that errored at the API level. type apiClient interface { URL(ep string, args map[string]string) *url.URL Do(context.Context, *http.Request) (*http.Response, []byte, Warnings, error) DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) } type apiClientImpl struct { client api.Client } type apiResponse struct { Status string `json:"status"` Data json.RawMessage `json:"data"` ErrorType ErrorType `json:"errorType"` Error string `json:"error"` Warnings []string `json:"warnings,omitempty"` } func apiError(code int) bool { // These are the codes that Prometheus sends when it returns an error. return code == http.StatusUnprocessableEntity || code == http.StatusBadRequest } func errorTypeAndMsgFor(resp *http.Response) (ErrorType, string) { switch resp.StatusCode / 100 { case 4: return ErrClient, fmt.Sprintf("client error: %d", resp.StatusCode) case 5: return ErrServer, fmt.Sprintf("server error: %d", resp.StatusCode) } return ErrBadResponse, fmt.Sprintf("bad response code %d", resp.StatusCode) } func (h *apiClientImpl) URL(ep string, args map[string]string) *url.URL { return h.client.URL(ep, args) } func (h *apiClientImpl) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, Warnings, error) { resp, body, err := h.client.Do(ctx, req) if err != nil { return resp, body, nil, err } code := resp.StatusCode if code/100 != 2 && !apiError(code) { errorType, errorMsg := errorTypeAndMsgFor(resp) return resp, body, nil, &Error{ Type: errorType, Msg: errorMsg, Detail: string(body), } } var result apiResponse if http.StatusNoContent != code { if jsonErr := json.Unmarshal(body, &result); jsonErr != nil { return resp, body, nil, &Error{ Type: ErrBadResponse, Msg: jsonErr.Error(), } } } if apiError(code) && result.Status == "success" { err = &Error{ Type: ErrBadResponse, Msg: "inconsistent body for response code", } } if result.Status == "error" { err = &Error{ Type: result.ErrorType, Msg: result.Error, } } return resp, []byte(result.Data), result.Warnings, err } // DoGetFallback will attempt to do the request as-is, and on a 405 or 501 it // will fallback to a GET request. func (h *apiClientImpl) DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) { req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode())) if err != nil { return nil, nil, nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") resp, body, warnings, err := h.Do(ctx, req) if resp != nil && (resp.StatusCode == http.StatusMethodNotAllowed || resp.StatusCode == http.StatusNotImplemented) { u.RawQuery = args.Encode() req, err = http.NewRequest(http.MethodGet, u.String(), nil) if err != nil { return nil, nil, warnings, err } } else { if err != nil { return resp, body, warnings, err } return resp, body, warnings, nil } return h.Do(ctx, req) } func formatTime(t time.Time) string { return strconv.FormatFloat(float64(t.Unix())+float64(t.Nanosecond())/1e9, 'f', -1, 64) } client_golang-1.11.0/api/prometheus/v1/api_bench_test.go000066400000000000000000000057221405741072000231750ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 import ( "encoding/json" "strconv" "testing" "time" jsoniter "github.com/json-iterator/go" "github.com/prometheus/common/model" ) func generateData(timeseries, datapoints int) model.Matrix { m := make(model.Matrix, 0) for i := 0; i < timeseries; i++ { lset := map[model.LabelName]model.LabelValue{ model.MetricNameLabel: model.LabelValue("timeseries_" + strconv.Itoa(i)), } now := model.Now() values := make([]model.SamplePair, datapoints) for x := datapoints; x > 0; x-- { values[x-1] = model.SamplePair{ // Set the time back assuming a 15s interval. Since this is used for // Marshal/Unmarshal testing the actual interval doesn't matter. Timestamp: now.Add(time.Second * -15 * time.Duration(x)), Value: model.SampleValue(float64(x)), } } ss := &model.SampleStream{ Metric: model.Metric(lset), Values: values, } m = append(m, ss) } return m } func BenchmarkSamplesJsonSerialization(b *testing.B) { for _, timeseriesCount := range []int{10, 100, 1000} { b.Run(strconv.Itoa(timeseriesCount), func(b *testing.B) { for _, datapointCount := range []int{10, 100, 1000} { b.Run(strconv.Itoa(datapointCount), func(b *testing.B) { data := generateData(timeseriesCount, datapointCount) dataBytes, err := json.Marshal(data) if err != nil { b.Fatalf("Error marshaling: %v", err) } b.Run("marshal", func(b *testing.B) { b.Run("encoding/json", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { if _, err := json.Marshal(data); err != nil { b.Fatal(err) } } }) b.Run("jsoniter", func(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { if _, err := jsoniter.Marshal(data); err != nil { b.Fatal(err) } } }) }) b.Run("unmarshal", func(b *testing.B) { b.Run("encoding/json", func(b *testing.B) { b.ReportAllocs() var m model.Matrix for i := 0; i < b.N; i++ { if err := json.Unmarshal(dataBytes, &m); err != nil { b.Fatal(err) } } }) b.Run("jsoniter", func(b *testing.B) { b.ReportAllocs() var m model.Matrix for i := 0; i < b.N; i++ { if err := jsoniter.Unmarshal(dataBytes, &m); err != nil { b.Fatal(err) } } }) }) }) } }) } } client_golang-1.11.0/api/prometheus/v1/api_test.go000066400000000000000000001272631405741072000220430ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 import ( "context" "errors" "fmt" "io/ioutil" "math" "net/http" "net/http/httptest" "net/url" "reflect" "strings" "testing" "time" json "github.com/json-iterator/go" "github.com/prometheus/common/model" ) type apiTest struct { do func() (interface{}, Warnings, error) inWarnings []string inErr error inStatusCode int inRes interface{} reqPath string reqParam url.Values reqMethod string res interface{} warnings Warnings err error } type apiTestClient struct { *testing.T curTest apiTest } func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL { path := ep for k, v := range args { path = strings.Replace(path, ":"+k, v, -1) } u := &url.URL{ Host: "test:9090", Path: path, } return u } func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, Warnings, error) { test := c.curTest if req.URL.Path != test.reqPath { c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path) } if req.Method != test.reqMethod { c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method) } b, err := json.Marshal(test.inRes) if err != nil { c.Fatal(err) } resp := &http.Response{} if test.inStatusCode != 0 { resp.StatusCode = test.inStatusCode } else if test.inErr != nil { resp.StatusCode = http.StatusUnprocessableEntity } else { resp.StatusCode = http.StatusOK } return resp, b, test.inWarnings, test.inErr } func (c *apiTestClient) DoGetFallback(ctx context.Context, u *url.URL, args url.Values) (*http.Response, []byte, Warnings, error) { req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(args.Encode())) if err != nil { return nil, nil, nil, err } return c.Do(ctx, req) } func TestAPIs(t *testing.T) { testTime := time.Now() tc := &apiTestClient{ T: t, } promAPI := &httpAPI{ client: tc, } doAlertManagers := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.AlertManagers(context.Background()) return v, nil, err } } doCleanTombstones := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { return nil, nil, promAPI.CleanTombstones(context.Background()) } } doConfig := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.Config(context.Background()) return v, nil, err } } doDeleteSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { return nil, nil, promAPI.DeleteSeries(context.Background(), []string{matcher}, startTime, endTime) } } doFlags := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.Flags(context.Background()) return v, nil, err } } doBuildinfo := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.Buildinfo(context.Background()) return v, nil, err } } doRuntimeinfo := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.Runtimeinfo(context.Background()) return v, nil, err } } doLabelNames := func(matches []string) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { return promAPI.LabelNames(context.Background(), matches, time.Now().Add(-100*time.Hour), time.Now()) } } doLabelValues := func(matches []string, label string) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { return promAPI.LabelValues(context.Background(), label, matches, time.Now().Add(-100*time.Hour), time.Now()) } } doQuery := func(q string, ts time.Time) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { return promAPI.Query(context.Background(), q, ts) } } doQueryRange := func(q string, rng Range) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { return promAPI.QueryRange(context.Background(), q, rng) } } doSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { return promAPI.Series(context.Background(), []string{matcher}, startTime, endTime) } } doSnapshot := func(skipHead bool) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.Snapshot(context.Background(), skipHead) return v, nil, err } } doRules := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.Rules(context.Background()) return v, nil, err } } doTargets := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.Targets(context.Background()) return v, nil, err } } doTargetsMetadata := func(matchTarget string, metric string, limit string) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.TargetsMetadata(context.Background(), matchTarget, metric, limit) return v, nil, err } } doMetadata := func(metric string, limit string) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.Metadata(context.Background(), metric, limit) return v, nil, err } } doTSDB := func() func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.TSDB(context.Background()) return v, nil, err } } doQueryExemplars := func(query string, startTime time.Time, endTime time.Time) func() (interface{}, Warnings, error) { return func() (interface{}, Warnings, error) { v, err := promAPI.QueryExemplars(context.Background(), query, startTime, endTime) return v, nil, err } } queryTests := []apiTest{ { do: doQuery("2", testTime), inRes: &queryResult{ Type: model.ValScalar, Result: &model.Scalar{ Value: 2, Timestamp: model.TimeFromUnix(testTime.Unix()), }, }, reqMethod: "POST", reqPath: "/api/v1/query", reqParam: url.Values{ "query": []string{"2"}, "time": []string{testTime.Format(time.RFC3339Nano)}, }, res: &model.Scalar{ Value: 2, Timestamp: model.TimeFromUnix(testTime.Unix()), }, }, { do: doQuery("2", testTime), inErr: fmt.Errorf("some error"), reqMethod: "POST", reqPath: "/api/v1/query", reqParam: url.Values{ "query": []string{"2"}, "time": []string{testTime.Format(time.RFC3339Nano)}, }, err: fmt.Errorf("some error"), }, { do: doQuery("2", testTime), inRes: "some body", inStatusCode: 500, inErr: &Error{ Type: ErrServer, Msg: "server error: 500", Detail: "some body", }, reqMethod: "POST", reqPath: "/api/v1/query", reqParam: url.Values{ "query": []string{"2"}, "time": []string{testTime.Format(time.RFC3339Nano)}, }, err: errors.New("server_error: server error: 500"), }, { do: doQuery("2", testTime), inRes: "some body", inStatusCode: 404, inErr: &Error{ Type: ErrClient, Msg: "client error: 404", Detail: "some body", }, reqMethod: "POST", reqPath: "/api/v1/query", reqParam: url.Values{ "query": []string{"2"}, "time": []string{testTime.Format(time.RFC3339Nano)}, }, err: errors.New("client_error: client error: 404"), }, // Warning only. { do: doQuery("2", testTime), inWarnings: []string{"warning"}, inRes: &queryResult{ Type: model.ValScalar, Result: &model.Scalar{ Value: 2, Timestamp: model.TimeFromUnix(testTime.Unix()), }, }, reqMethod: "POST", reqPath: "/api/v1/query", reqParam: url.Values{ "query": []string{"2"}, "time": []string{testTime.Format(time.RFC3339Nano)}, }, res: &model.Scalar{ Value: 2, Timestamp: model.TimeFromUnix(testTime.Unix()), }, warnings: []string{"warning"}, }, // Warning + error. { do: doQuery("2", testTime), inWarnings: []string{"warning"}, inRes: "some body", inStatusCode: 404, inErr: &Error{ Type: ErrClient, Msg: "client error: 404", Detail: "some body", }, reqMethod: "POST", reqPath: "/api/v1/query", reqParam: url.Values{ "query": []string{"2"}, "time": []string{testTime.Format(time.RFC3339Nano)}, }, err: errors.New("client_error: client error: 404"), warnings: []string{"warning"}, }, { do: doQueryRange("2", Range{ Start: testTime.Add(-time.Minute), End: testTime, Step: time.Minute, }), inErr: fmt.Errorf("some error"), reqMethod: "POST", reqPath: "/api/v1/query_range", reqParam: url.Values{ "query": []string{"2"}, "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, "end": []string{testTime.Format(time.RFC3339Nano)}, "step": []string{time.Minute.String()}, }, err: fmt.Errorf("some error"), }, { do: doLabelNames(nil), inRes: []string{"val1", "val2"}, reqMethod: "GET", reqPath: "/api/v1/labels", res: []string{"val1", "val2"}, }, { do: doLabelNames(nil), inRes: []string{"val1", "val2"}, inWarnings: []string{"a"}, reqMethod: "GET", reqPath: "/api/v1/labels", res: []string{"val1", "val2"}, warnings: []string{"a"}, }, { do: doLabelNames(nil), inErr: fmt.Errorf("some error"), reqMethod: "GET", reqPath: "/api/v1/labels", err: fmt.Errorf("some error"), }, { do: doLabelNames(nil), inErr: fmt.Errorf("some error"), inWarnings: []string{"a"}, reqMethod: "GET", reqPath: "/api/v1/labels", err: fmt.Errorf("some error"), warnings: []string{"a"}, }, { do: doLabelNames([]string{"up"}), inRes: []string{"val1", "val2"}, reqMethod: "GET", reqPath: "/api/v1/labels", reqParam: url.Values{"match[]": {"up"}}, res: []string{"val1", "val2"}, }, { do: doLabelValues(nil, "mylabel"), inRes: []string{"val1", "val2"}, reqMethod: "GET", reqPath: "/api/v1/label/mylabel/values", res: model.LabelValues{"val1", "val2"}, }, { do: doLabelValues(nil, "mylabel"), inRes: []string{"val1", "val2"}, inWarnings: []string{"a"}, reqMethod: "GET", reqPath: "/api/v1/label/mylabel/values", res: model.LabelValues{"val1", "val2"}, warnings: []string{"a"}, }, { do: doLabelValues(nil, "mylabel"), inErr: fmt.Errorf("some error"), reqMethod: "GET", reqPath: "/api/v1/label/mylabel/values", err: fmt.Errorf("some error"), }, { do: doLabelValues(nil, "mylabel"), inErr: fmt.Errorf("some error"), inWarnings: []string{"a"}, reqMethod: "GET", reqPath: "/api/v1/label/mylabel/values", err: fmt.Errorf("some error"), warnings: []string{"a"}, }, { do: doLabelValues([]string{"up"}, "mylabel"), inRes: []string{"val1", "val2"}, reqMethod: "GET", reqPath: "/api/v1/label/mylabel/values", reqParam: url.Values{"match[]": {"up"}}, res: model.LabelValues{"val1", "val2"}, }, { do: doSeries("up", testTime.Add(-time.Minute), testTime), inRes: []map[string]string{ { "__name__": "up", "job": "prometheus", "instance": "localhost:9090"}, }, reqMethod: "GET", reqPath: "/api/v1/series", reqParam: url.Values{ "match": []string{"up"}, "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, "end": []string{testTime.Format(time.RFC3339Nano)}, }, res: []model.LabelSet{ { "__name__": "up", "job": "prometheus", "instance": "localhost:9090", }, }, }, // Series with data + warning. { do: doSeries("up", testTime.Add(-time.Minute), testTime), inRes: []map[string]string{ { "__name__": "up", "job": "prometheus", "instance": "localhost:9090"}, }, inWarnings: []string{"a"}, reqMethod: "GET", reqPath: "/api/v1/series", reqParam: url.Values{ "match": []string{"up"}, "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, "end": []string{testTime.Format(time.RFC3339Nano)}, }, res: []model.LabelSet{ { "__name__": "up", "job": "prometheus", "instance": "localhost:9090", }, }, warnings: []string{"a"}, }, { do: doSeries("up", testTime.Add(-time.Minute), testTime), inErr: fmt.Errorf("some error"), reqMethod: "GET", reqPath: "/api/v1/series", reqParam: url.Values{ "match": []string{"up"}, "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, "end": []string{testTime.Format(time.RFC3339Nano)}, }, err: fmt.Errorf("some error"), }, // Series with error and warning. { do: doSeries("up", testTime.Add(-time.Minute), testTime), inErr: fmt.Errorf("some error"), inWarnings: []string{"a"}, reqMethod: "GET", reqPath: "/api/v1/series", reqParam: url.Values{ "match": []string{"up"}, "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, "end": []string{testTime.Format(time.RFC3339Nano)}, }, err: fmt.Errorf("some error"), warnings: []string{"a"}, }, { do: doSnapshot(true), inRes: map[string]string{ "name": "20171210T211224Z-2be650b6d019eb54", }, reqMethod: "POST", reqPath: "/api/v1/admin/tsdb/snapshot", reqParam: url.Values{ "skip_head": []string{"true"}, }, res: SnapshotResult{ Name: "20171210T211224Z-2be650b6d019eb54", }, }, { do: doSnapshot(true), inErr: fmt.Errorf("some error"), reqMethod: "POST", reqPath: "/api/v1/admin/tsdb/snapshot", err: fmt.Errorf("some error"), }, { do: doCleanTombstones(), reqMethod: "POST", reqPath: "/api/v1/admin/tsdb/clean_tombstones", }, { do: doCleanTombstones(), inErr: fmt.Errorf("some error"), reqMethod: "POST", reqPath: "/api/v1/admin/tsdb/clean_tombstones", err: fmt.Errorf("some error"), }, { do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime), inRes: []map[string]string{ { "__name__": "up", "job": "prometheus", "instance": "localhost:9090"}, }, reqMethod: "POST", reqPath: "/api/v1/admin/tsdb/delete_series", reqParam: url.Values{ "match": []string{"up"}, "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, "end": []string{testTime.Format(time.RFC3339Nano)}, }, }, { do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime), inErr: fmt.Errorf("some error"), reqMethod: "POST", reqPath: "/api/v1/admin/tsdb/delete_series", reqParam: url.Values{ "match": []string{"up"}, "start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)}, "end": []string{testTime.Format(time.RFC3339Nano)}, }, err: fmt.Errorf("some error"), }, { do: doConfig(), reqMethod: "GET", reqPath: "/api/v1/status/config", inRes: map[string]string{ "yaml": "", }, res: ConfigResult{ YAML: "", }, }, { do: doConfig(), reqMethod: "GET", reqPath: "/api/v1/status/config", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doFlags(), reqMethod: "GET", reqPath: "/api/v1/status/flags", inRes: map[string]string{ "alertmanager.notification-queue-capacity": "10000", "alertmanager.timeout": "10s", "log.level": "info", "query.lookback-delta": "5m", "query.max-concurrency": "20", }, res: FlagsResult{ "alertmanager.notification-queue-capacity": "10000", "alertmanager.timeout": "10s", "log.level": "info", "query.lookback-delta": "5m", "query.max-concurrency": "20", }, }, { do: doFlags(), reqMethod: "GET", reqPath: "/api/v1/status/flags", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doBuildinfo(), reqMethod: "GET", reqPath: "/api/v1/status/buildinfo", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doBuildinfo(), reqMethod: "GET", reqPath: "/api/v1/status/buildinfo", inRes: map[string]interface{}{ "version": "2.23.0", "revision": "26d89b4b0776fe4cd5a3656dfa520f119a375273", "branch": "HEAD", "buildUser": "root@37609b3a0a21", "buildDate": "20201126-10:56:17", "goVersion": "go1.15.5", }, res: BuildinfoResult{ Version: "2.23.0", Revision: "26d89b4b0776fe4cd5a3656dfa520f119a375273", Branch: "HEAD", BuildUser: "root@37609b3a0a21", BuildDate: "20201126-10:56:17", GoVersion: "go1.15.5", }, }, { do: doRuntimeinfo(), reqMethod: "GET", reqPath: "/api/v1/status/runtimeinfo", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doRuntimeinfo(), reqMethod: "GET", reqPath: "/api/v1/status/runtimeinfo", inRes: map[string]interface{}{ "startTime": "2020-05-18T15:52:53.4503113Z", "CWD": "/prometheus", "reloadConfigSuccess": true, "lastConfigTime": "2020-05-18T15:52:56Z", "chunkCount": 72692, "timeSeriesCount": 18476, "corruptionCount": 0, "goroutineCount": 217, "GOMAXPROCS": 2, "GOGC": "100", "GODEBUG": "allocfreetrace", "storageRetention": "1d", }, res: RuntimeinfoResult{ StartTime: time.Date(2020, 5, 18, 15, 52, 53, 450311300, time.UTC), CWD: "/prometheus", ReloadConfigSuccess: true, LastConfigTime: time.Date(2020, 5, 18, 15, 52, 56, 0, time.UTC), ChunkCount: 72692, TimeSeriesCount: 18476, CorruptionCount: 0, GoroutineCount: 217, GOMAXPROCS: 2, GOGC: "100", GODEBUG: "allocfreetrace", StorageRetention: "1d", }, }, { do: doAlertManagers(), reqMethod: "GET", reqPath: "/api/v1/alertmanagers", inRes: map[string]interface{}{ "activeAlertManagers": []map[string]string{ { "url": "http://127.0.0.1:9091/api/v1/alerts", }, }, "droppedAlertManagers": []map[string]string{ { "url": "http://127.0.0.1:9092/api/v1/alerts", }, }, }, res: AlertManagersResult{ Active: []AlertManager{ { URL: "http://127.0.0.1:9091/api/v1/alerts", }, }, Dropped: []AlertManager{ { URL: "http://127.0.0.1:9092/api/v1/alerts", }, }, }, }, { do: doAlertManagers(), reqMethod: "GET", reqPath: "/api/v1/alertmanagers", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doRules(), reqMethod: "GET", reqPath: "/api/v1/rules", inRes: map[string]interface{}{ "groups": []map[string]interface{}{ { "file": "/rules.yaml", "interval": 60, "name": "example", "rules": []map[string]interface{}{ { "alerts": []map[string]interface{}{ { "activeAt": testTime.UTC().Format(time.RFC3339Nano), "annotations": map[string]interface{}{ "summary": "High request latency", }, "labels": map[string]interface{}{ "alertname": "HighRequestLatency", "severity": "page", }, "state": "firing", "value": "1e+00", }, }, "annotations": map[string]interface{}{ "summary": "High request latency", }, "duration": 600, "health": "ok", "labels": map[string]interface{}{ "severity": "page", }, "name": "HighRequestLatency", "query": "job:request_latency_seconds:mean5m{job=\"myjob\"} > 0.5", "type": "alerting", }, { "health": "ok", "name": "job:http_inprogress_requests:sum", "query": "sum(http_inprogress_requests) by (job)", "type": "recording", }, }, }, }, }, res: RulesResult{ Groups: []RuleGroup{ { Name: "example", File: "/rules.yaml", Interval: 60, Rules: []interface{}{ AlertingRule{ Alerts: []*Alert{ { ActiveAt: testTime.UTC(), Annotations: model.LabelSet{ "summary": "High request latency", }, Labels: model.LabelSet{ "alertname": "HighRequestLatency", "severity": "page", }, State: AlertStateFiring, Value: "1e+00", }, }, Annotations: model.LabelSet{ "summary": "High request latency", }, Labels: model.LabelSet{ "severity": "page", }, Duration: 600, Health: RuleHealthGood, Name: "HighRequestLatency", Query: "job:request_latency_seconds:mean5m{job=\"myjob\"} > 0.5", LastError: "", }, RecordingRule{ Health: RuleHealthGood, Name: "job:http_inprogress_requests:sum", Query: "sum(http_inprogress_requests) by (job)", LastError: "", }, }, }, }, }, }, // This has the newer API elements like lastEvaluation, evaluationTime, etc. { do: doRules(), reqMethod: "GET", reqPath: "/api/v1/rules", inRes: map[string]interface{}{ "groups": []map[string]interface{}{ { "file": "/rules.yaml", "interval": 60, "name": "example", "rules": []map[string]interface{}{ { "alerts": []map[string]interface{}{ { "activeAt": testTime.UTC().Format(time.RFC3339Nano), "annotations": map[string]interface{}{ "summary": "High request latency", }, "labels": map[string]interface{}{ "alertname": "HighRequestLatency", "severity": "page", }, "state": "firing", "value": "1e+00", }, }, "annotations": map[string]interface{}{ "summary": "High request latency", }, "duration": 600, "health": "ok", "labels": map[string]interface{}{ "severity": "page", }, "name": "HighRequestLatency", "query": "job:request_latency_seconds:mean5m{job=\"myjob\"} > 0.5", "type": "alerting", "evaluationTime": 0.5, "lastEvaluation": "2020-05-18T15:52:53.4503113Z", "state": "firing", }, { "health": "ok", "name": "job:http_inprogress_requests:sum", "query": "sum(http_inprogress_requests) by (job)", "type": "recording", "evaluationTime": 0.3, "lastEvaluation": "2020-05-18T15:52:53.4503113Z", }, }, }, }, }, res: RulesResult{ Groups: []RuleGroup{ { Name: "example", File: "/rules.yaml", Interval: 60, Rules: []interface{}{ AlertingRule{ Alerts: []*Alert{ { ActiveAt: testTime.UTC(), Annotations: model.LabelSet{ "summary": "High request latency", }, Labels: model.LabelSet{ "alertname": "HighRequestLatency", "severity": "page", }, State: AlertStateFiring, Value: "1e+00", }, }, Annotations: model.LabelSet{ "summary": "High request latency", }, Labels: model.LabelSet{ "severity": "page", }, Duration: 600, Health: RuleHealthGood, Name: "HighRequestLatency", Query: "job:request_latency_seconds:mean5m{job=\"myjob\"} > 0.5", LastError: "", EvaluationTime: 0.5, LastEvaluation: time.Date(2020, 5, 18, 15, 52, 53, 450311300, time.UTC), State: "firing", }, RecordingRule{ Health: RuleHealthGood, Name: "job:http_inprogress_requests:sum", Query: "sum(http_inprogress_requests) by (job)", LastError: "", EvaluationTime: 0.3, LastEvaluation: time.Date(2020, 5, 18, 15, 52, 53, 450311300, time.UTC), }, }, }, }, }, }, { do: doRules(), reqMethod: "GET", reqPath: "/api/v1/rules", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doTargets(), reqMethod: "GET", reqPath: "/api/v1/targets", inRes: map[string]interface{}{ "activeTargets": []map[string]interface{}{ { "discoveredLabels": map[string]string{ "__address__": "127.0.0.1:9090", "__metrics_path__": "/metrics", "__scheme__": "http", "job": "prometheus", }, "labels": map[string]string{ "instance": "127.0.0.1:9090", "job": "prometheus", }, "scrapePool": "prometheus", "scrapeUrl": "http://127.0.0.1:9090", "globalUrl": "http://127.0.0.1:9090", "lastError": "error while scraping target", "lastScrape": testTime.UTC().Format(time.RFC3339Nano), "lastScrapeDuration": 0.001146115, "health": "up", }, }, "droppedTargets": []map[string]interface{}{ { "discoveredLabels": map[string]string{ "__address__": "127.0.0.1:9100", "__metrics_path__": "/metrics", "__scheme__": "http", "job": "node", }, }, }, }, res: TargetsResult{ Active: []ActiveTarget{ { DiscoveredLabels: map[string]string{ "__address__": "127.0.0.1:9090", "__metrics_path__": "/metrics", "__scheme__": "http", "job": "prometheus", }, Labels: model.LabelSet{ "instance": "127.0.0.1:9090", "job": "prometheus", }, ScrapePool: "prometheus", ScrapeURL: "http://127.0.0.1:9090", GlobalURL: "http://127.0.0.1:9090", LastError: "error while scraping target", LastScrape: testTime.UTC(), LastScrapeDuration: 0.001146115, Health: HealthGood, }, }, Dropped: []DroppedTarget{ { DiscoveredLabels: map[string]string{ "__address__": "127.0.0.1:9100", "__metrics_path__": "/metrics", "__scheme__": "http", "job": "node", }, }, }, }, }, { do: doTargets(), reqMethod: "GET", reqPath: "/api/v1/targets", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doTargetsMetadata("{job=\"prometheus\"}", "go_goroutines", "1"), inRes: []map[string]interface{}{ { "target": map[string]interface{}{ "instance": "127.0.0.1:9090", "job": "prometheus", }, "type": "gauge", "help": "Number of goroutines that currently exist.", "unit": "", }, }, reqMethod: "GET", reqPath: "/api/v1/targets/metadata", reqParam: url.Values{ "match_target": []string{"{job=\"prometheus\"}"}, "metric": []string{"go_goroutines"}, "limit": []string{"1"}, }, res: []MetricMetadata{ { Target: map[string]string{ "instance": "127.0.0.1:9090", "job": "prometheus", }, Type: "gauge", Help: "Number of goroutines that currently exist.", Unit: "", }, }, }, { do: doTargetsMetadata("{job=\"prometheus\"}", "go_goroutines", "1"), inErr: fmt.Errorf("some error"), reqMethod: "GET", reqPath: "/api/v1/targets/metadata", reqParam: url.Values{ "match_target": []string{"{job=\"prometheus\"}"}, "metric": []string{"go_goroutines"}, "limit": []string{"1"}, }, err: fmt.Errorf("some error"), }, { do: doMetadata("go_goroutines", "1"), inRes: map[string]interface{}{ "go_goroutines": []map[string]interface{}{ { "type": "gauge", "help": "Number of goroutines that currently exist.", "unit": "", }, }, }, reqMethod: "GET", reqPath: "/api/v1/metadata", reqParam: url.Values{ "metric": []string{"go_goroutines"}, "limit": []string{"1"}, }, res: map[string][]Metadata{ "go_goroutines": []Metadata{ { Type: "gauge", Help: "Number of goroutines that currently exist.", Unit: "", }, }, }, }, { do: doMetadata("", "1"), inErr: fmt.Errorf("some error"), reqMethod: "GET", reqPath: "/api/v1/metadata", reqParam: url.Values{ "metric": []string{""}, "limit": []string{"1"}, }, err: fmt.Errorf("some error"), }, { do: doTSDB(), reqMethod: "GET", reqPath: "/api/v1/status/tsdb", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doTSDB(), reqMethod: "GET", reqPath: "/api/v1/status/tsdb", inRes: map[string]interface{}{ "seriesCountByMetricName": []interface{}{ map[string]interface{}{ "name": "kubelet_http_requests_duration_seconds_bucket", "value": 1000, }, }, "labelValueCountByLabelName": []interface{}{ map[string]interface{}{ "name": "__name__", "value": 200, }, }, "memoryInBytesByLabelName": []interface{}{ map[string]interface{}{ "name": "id", "value": 4096, }, }, "seriesCountByLabelValuePair": []interface{}{ map[string]interface{}{ "name": "job=kubelet", "value": 30000, }, }, }, res: TSDBResult{ SeriesCountByMetricName: []Stat{ { Name: "kubelet_http_requests_duration_seconds_bucket", Value: 1000, }, }, LabelValueCountByLabelName: []Stat{ { Name: "__name__", Value: 200, }, }, MemoryInBytesByLabelName: []Stat{ { Name: "id", Value: 4096, }, }, SeriesCountByLabelValuePair: []Stat{ { Name: "job=kubelet", Value: 30000, }, }, }, }, { do: doQueryExemplars("tns_request_duration_seconds_bucket", testTime.Add(-1*time.Minute), testTime), reqMethod: "GET", reqPath: "/api/v1/query_exemplars", inErr: fmt.Errorf("some error"), err: fmt.Errorf("some error"), }, { do: doQueryExemplars("tns_request_duration_seconds_bucket", testTime.Add(-1*time.Minute), testTime), reqMethod: "GET", reqPath: "/api/v1/query_exemplars", inRes: []interface{}{ map[string]interface{}{ "seriesLabels": map[string]interface{}{ "__name__": "tns_request_duration_seconds_bucket", "instance": "app:80", "job": "tns/app", }, "exemplars": []interface{}{ map[string]interface{}{ "labels": map[string]interface{}{ "traceID": "19fd8c8a33975a23", }, "value": "0.003863295", "timestamp": model.TimeFromUnixNano(testTime.UnixNano()), }, map[string]interface{}{ "labels": map[string]interface{}{ "traceID": "67f743f07cc786b0", }, "value": "0.001535405", "timestamp": model.TimeFromUnixNano(testTime.UnixNano()), }, }, }, }, res: []ExemplarQueryResult{ { SeriesLabels: model.LabelSet{ "__name__": "tns_request_duration_seconds_bucket", "instance": "app:80", "job": "tns/app", }, Exemplars: []Exemplar{ { Labels: model.LabelSet{"traceID": "19fd8c8a33975a23"}, Value: 0.003863295, Timestamp: model.TimeFromUnixNano(testTime.UnixNano()), }, { Labels: model.LabelSet{"traceID": "67f743f07cc786b0"}, Value: 0.001535405, Timestamp: model.TimeFromUnixNano(testTime.UnixNano()), }, }, }, }, }, } var tests []apiTest tests = append(tests, queryTests...) for i, test := range tests { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { tc.curTest = test res, warnings, err := test.do() if (test.inWarnings == nil) != (warnings == nil) && !reflect.DeepEqual(test.inWarnings, warnings) { t.Fatalf("mismatch in warnings expected=%v actual=%v", test.inWarnings, warnings) } if test.err != nil { if err == nil { t.Fatalf("expected error %q but got none", test.err) } if err.Error() != test.err.Error() { t.Errorf("unexpected error: want %s, got %s", test.err, err) } if apiErr, ok := err.(*Error); ok { if apiErr.Detail != test.inRes { t.Errorf("%q should be %q", apiErr.Detail, test.inRes) } } return } if err != nil { t.Fatalf("unexpected error: %s", err) } if !reflect.DeepEqual(res, test.res) { t.Errorf("unexpected result: want %v, got %v", test.res, res) } }) } } type testClient struct { *testing.T ch chan apiClientTest req *http.Request } type apiClientTest struct { code int response interface{} expectedBody string expectedErr *Error expectedWarnings Warnings } func (c *testClient) URL(ep string, args map[string]string) *url.URL { return nil } func (c *testClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { if ctx == nil { c.Fatalf("context was not passed down") } if req != c.req { c.Fatalf("request was not passed down") } test := <-c.ch var b []byte var err error switch v := test.response.(type) { case string: b = []byte(v) default: b, err = json.Marshal(v) if err != nil { c.Fatal(err) } } resp := &http.Response{ StatusCode: test.code, } return resp, b, nil } func TestAPIClientDo(t *testing.T) { tests := []apiClientTest{ { code: http.StatusUnprocessableEntity, response: &apiResponse{ Status: "error", Data: json.RawMessage(`null`), ErrorType: ErrBadData, Error: "failed", }, expectedErr: &Error{ Type: ErrBadData, Msg: "failed", }, expectedBody: `null`, }, { code: http.StatusUnprocessableEntity, response: &apiResponse{ Status: "error", Data: json.RawMessage(`"test"`), ErrorType: ErrTimeout, Error: "timed out", }, expectedErr: &Error{ Type: ErrTimeout, Msg: "timed out", }, expectedBody: `test`, }, { code: http.StatusInternalServerError, response: "500 error details", expectedErr: &Error{ Type: ErrServer, Msg: "server error: 500", Detail: "500 error details", }, }, { code: http.StatusNotFound, response: "404 error details", expectedErr: &Error{ Type: ErrClient, Msg: "client error: 404", Detail: "404 error details", }, }, { code: http.StatusBadRequest, response: &apiResponse{ Status: "error", Data: json.RawMessage(`null`), ErrorType: ErrBadData, Error: "end timestamp must not be before start time", }, expectedErr: &Error{ Type: ErrBadData, Msg: "end timestamp must not be before start time", }, }, { code: http.StatusUnprocessableEntity, response: "bad json", expectedErr: &Error{ Type: ErrBadResponse, Msg: "readObjectStart: expect { or n, but found b, error found in #1 byte of ...|bad json|..., bigger context ...|bad json|...", }, }, { code: http.StatusUnprocessableEntity, response: &apiResponse{ Status: "success", Data: json.RawMessage(`"test"`), }, expectedErr: &Error{ Type: ErrBadResponse, Msg: "inconsistent body for response code", }, }, { code: http.StatusUnprocessableEntity, response: &apiResponse{ Status: "success", Data: json.RawMessage(`"test"`), ErrorType: ErrTimeout, Error: "timed out", }, expectedErr: &Error{ Type: ErrBadResponse, Msg: "inconsistent body for response code", }, }, { code: http.StatusOK, response: &apiResponse{ Status: "error", Data: json.RawMessage(`"test"`), ErrorType: ErrTimeout, Error: "timed out", }, expectedErr: &Error{ Type: ErrTimeout, Msg: "timed out", }, }, { code: http.StatusOK, response: &apiResponse{ Status: "error", Data: json.RawMessage(`"test"`), ErrorType: ErrTimeout, Error: "timed out", Warnings: []string{"a"}, }, expectedErr: &Error{ Type: ErrTimeout, Msg: "timed out", }, expectedWarnings: []string{"a"}, }, } tc := &testClient{ T: t, ch: make(chan apiClientTest, 1), req: &http.Request{}, } client := &apiClientImpl{ client: tc, } for i, test := range tests { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { tc.ch <- test _, body, warnings, err := client.Do(context.Background(), tc.req) if test.expectedWarnings != nil { if !reflect.DeepEqual(test.expectedWarnings, warnings) { t.Fatalf("mismatch in warnings expected=%v actual=%v", test.expectedWarnings, warnings) } } else { if warnings != nil { t.Fatalf("unexpexted warnings: %v", warnings) } } if test.expectedErr != nil { if err == nil { t.Fatal("expected error, but got none") } if test.expectedErr.Error() != err.Error() { t.Fatalf("expected error:%v, but got:%v", test.expectedErr.Error(), err.Error()) } if test.expectedErr.Detail != "" { apiErr := err.(*Error) if apiErr.Detail != test.expectedErr.Detail { t.Fatalf("expected error detail :%v, but got:%v", apiErr.Detail, test.expectedErr.Detail) } } return } if err != nil { t.Fatalf("unexpected error:%v", err) } if test.expectedBody != string(body) { t.Fatalf("expected body :%v, but got:%v", test.expectedBody, string(body)) } }) } } func TestSamplesJsonSerialization(t *testing.T) { tests := []struct { point model.SamplePair expected string }{ { point: model.SamplePair{0, 0}, expected: `[0,"0"]`, }, { point: model.SamplePair{1, 20}, expected: `[0.001,"20"]`, }, { point: model.SamplePair{10, 20}, expected: `[0.010,"20"]`, }, { point: model.SamplePair{100, 20}, expected: `[0.100,"20"]`, }, { point: model.SamplePair{1001, 20}, expected: `[1.001,"20"]`, }, { point: model.SamplePair{1010, 20}, expected: `[1.010,"20"]`, }, { point: model.SamplePair{1100, 20}, expected: `[1.100,"20"]`, }, { point: model.SamplePair{12345678123456555, 20}, expected: `[12345678123456.555,"20"]`, }, { point: model.SamplePair{-1, 20}, expected: `[-0.001,"20"]`, }, { point: model.SamplePair{0, model.SampleValue(math.NaN())}, expected: `[0,"NaN"]`, }, { point: model.SamplePair{0, model.SampleValue(math.Inf(1))}, expected: `[0,"+Inf"]`, }, { point: model.SamplePair{0, model.SampleValue(math.Inf(-1))}, expected: `[0,"-Inf"]`, }, { point: model.SamplePair{0, model.SampleValue(1.2345678e6)}, expected: `[0,"1234567.8"]`, }, { point: model.SamplePair{0, 1.2345678e-6}, expected: `[0,"0.0000012345678"]`, }, { point: model.SamplePair{0, 1.2345678e-67}, expected: `[0,"1.2345678e-67"]`, }, } for _, test := range tests { t.Run(test.expected, func(t *testing.T) { b, err := json.Marshal(test.point) if err != nil { t.Fatal(err) } if string(b) != test.expected { t.Fatalf("Mismatch marshal expected=%s actual=%s", test.expected, string(b)) } // To test Unmarshal we will Unmarshal then re-Marshal this way we // can do a string compare, otherwise Nan values don't show equivalence // properly. var sp model.SamplePair if err = json.Unmarshal(b, &sp); err != nil { t.Fatal(err) } b, err = json.Marshal(sp) if err != nil { t.Fatal(err) } if string(b) != test.expected { t.Fatalf("Mismatch marshal expected=%s actual=%s", test.expected, string(b)) } }) } } type httpTestClient struct { client http.Client } func (c *httpTestClient) URL(ep string, args map[string]string) *url.URL { return nil } func (c *httpTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) { resp, err := c.client.Do(req) if err != nil { return nil, nil, err } var body []byte done := make(chan struct{}) go func() { body, err = ioutil.ReadAll(resp.Body) close(done) }() select { case <-ctx.Done(): <-done err = resp.Body.Close() if err == nil { err = ctx.Err() } case <-done: } return resp, body, err } func TestDoGetFallback(t *testing.T) { v := url.Values{"a": []string{"1", "2"}} type testResponse struct { Values string Method string } // Start a local HTTP server. server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { req.ParseForm() testResp, _ := json.Marshal(&testResponse{ Values: req.Form.Encode(), Method: req.Method, }) apiResp := &apiResponse{ Data: testResp, } body, _ := json.Marshal(apiResp) if req.Method == http.MethodPost { if req.URL.Path == "/blockPost405" { http.Error(w, string(body), http.StatusMethodNotAllowed) return } } if req.Method == http.MethodPost { if req.URL.Path == "/blockPost501" { http.Error(w, string(body), http.StatusNotImplemented) return } } w.Write(body) })) // Close the server when test finishes. defer server.Close() u, err := url.Parse(server.URL) if err != nil { t.Fatal(err) } client := &httpTestClient{client: *(server.Client())} api := &apiClientImpl{ client: client, } // Do a post, and ensure that the post succeeds. _, b, _, err := api.DoGetFallback(context.TODO(), u, v) if err != nil { t.Fatalf("Error doing local request: %v", err) } resp := &testResponse{} if err := json.Unmarshal(b, resp); err != nil { t.Fatal(err) } if resp.Method != http.MethodPost { t.Fatalf("Mismatch method") } if resp.Values != v.Encode() { t.Fatalf("Mismatch in values") } // Do a fallback to a get on 405. u.Path = "/blockPost405" _, b, _, err = api.DoGetFallback(context.TODO(), u, v) if err != nil { t.Fatalf("Error doing local request: %v", err) } if err := json.Unmarshal(b, resp); err != nil { t.Fatal(err) } if resp.Method != http.MethodGet { t.Fatalf("Mismatch method") } if resp.Values != v.Encode() { t.Fatalf("Mismatch in values") } // Do a fallback to a get on 501. u.Path = "/blockPost501" _, b, _, err = api.DoGetFallback(context.TODO(), u, v) if err != nil { t.Fatalf("Error doing local request: %v", err) } if err := json.Unmarshal(b, resp); err != nil { t.Fatal(err) } if resp.Method != http.MethodGet { t.Fatalf("Mismatch method") } if resp.Values != v.Encode() { t.Fatalf("Mismatch in values") } } client_golang-1.11.0/api/prometheus/v1/example_test.go000066400000000000000000000141471405741072000227210ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package v1_test provides examples making requests to Prometheus using the // Golang client. package v1_test import ( "context" "fmt" "net/http" "os" "time" "github.com/prometheus/client_golang/api" v1 "github.com/prometheus/client_golang/api/prometheus/v1" "github.com/prometheus/common/config" ) func ExampleAPI_query() { client, err := api.NewClient(api.Config{ Address: "http://demo.robustperception.io:9090", }) if err != nil { fmt.Printf("Error creating client: %v\n", err) os.Exit(1) } v1api := v1.NewAPI(client) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() result, warnings, err := v1api.Query(ctx, "up", time.Now()) if err != nil { fmt.Printf("Error querying Prometheus: %v\n", err) os.Exit(1) } if len(warnings) > 0 { fmt.Printf("Warnings: %v\n", warnings) } fmt.Printf("Result:\n%v\n", result) } func ExampleAPI_queryRange() { client, err := api.NewClient(api.Config{ Address: "http://demo.robustperception.io:9090", }) if err != nil { fmt.Printf("Error creating client: %v\n", err) os.Exit(1) } v1api := v1.NewAPI(client) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() r := v1.Range{ Start: time.Now().Add(-time.Hour), End: time.Now(), Step: time.Minute, } result, warnings, err := v1api.QueryRange(ctx, "rate(prometheus_tsdb_head_samples_appended_total[5m])", r) if err != nil { fmt.Printf("Error querying Prometheus: %v\n", err) os.Exit(1) } if len(warnings) > 0 { fmt.Printf("Warnings: %v\n", warnings) } fmt.Printf("Result:\n%v\n", result) } type userAgentRoundTripper struct { name string rt http.RoundTripper } // RoundTrip implements the http.RoundTripper interface. func (u userAgentRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { if r.UserAgent() == "" { // The specification of http.RoundTripper says that it shouldn't mutate // the request so make a copy of req.Header since this is all that is // modified. r2 := new(http.Request) *r2 = *r r2.Header = make(http.Header) for k, s := range r.Header { r2.Header[k] = s } r2.Header.Set("User-Agent", u.name) r = r2 } return u.rt.RoundTrip(r) } func ExampleAPI_queryRangeWithUserAgent() { client, err := api.NewClient(api.Config{ Address: "http://demo.robustperception.io:9090", RoundTripper: userAgentRoundTripper{name: "Client-Golang", rt: api.DefaultRoundTripper}, }) if err != nil { fmt.Printf("Error creating client: %v\n", err) os.Exit(1) } v1api := v1.NewAPI(client) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() r := v1.Range{ Start: time.Now().Add(-time.Hour), End: time.Now(), Step: time.Minute, } result, warnings, err := v1api.QueryRange(ctx, "rate(prometheus_tsdb_head_samples_appended_total[5m])", r) if err != nil { fmt.Printf("Error querying Prometheus: %v\n", err) os.Exit(1) } if len(warnings) > 0 { fmt.Printf("Warnings: %v\n", warnings) } fmt.Printf("Result:\n%v\n", result) } func ExampleAPI_queryRangeWithBasicAuth() { client, err := api.NewClient(api.Config{ Address: "http://demo.robustperception.io:9090", // We can use amazing github.com/prometheus/common/config helper! RoundTripper: config.NewBasicAuthRoundTripper("me", "defintely_me", "", api.DefaultRoundTripper), }) if err != nil { fmt.Printf("Error creating client: %v\n", err) os.Exit(1) } v1api := v1.NewAPI(client) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() r := v1.Range{ Start: time.Now().Add(-time.Hour), End: time.Now(), Step: time.Minute, } result, warnings, err := v1api.QueryRange(ctx, "rate(prometheus_tsdb_head_samples_appended_total[5m])", r) if err != nil { fmt.Printf("Error querying Prometheus: %v\n", err) os.Exit(1) } if len(warnings) > 0 { fmt.Printf("Warnings: %v\n", warnings) } fmt.Printf("Result:\n%v\n", result) } func ExampleAPI_queryRangeWithAuthBearerToken() { client, err := api.NewClient(api.Config{ Address: "http://demo.robustperception.io:9090", // We can use amazing github.com/prometheus/common/config helper! RoundTripper: config.NewAuthorizationCredentialsRoundTripper("Bearer", "secret_token", api.DefaultRoundTripper), }) if err != nil { fmt.Printf("Error creating client: %v\n", err) os.Exit(1) } v1api := v1.NewAPI(client) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() r := v1.Range{ Start: time.Now().Add(-time.Hour), End: time.Now(), Step: time.Minute, } result, warnings, err := v1api.QueryRange(ctx, "rate(prometheus_tsdb_head_samples_appended_total[5m])", r) if err != nil { fmt.Printf("Error querying Prometheus: %v\n", err) os.Exit(1) } if len(warnings) > 0 { fmt.Printf("Warnings: %v\n", warnings) } fmt.Printf("Result:\n%v\n", result) } func ExampleAPI_series() { client, err := api.NewClient(api.Config{ Address: "http://demo.robustperception.io:9090", }) if err != nil { fmt.Printf("Error creating client: %v\n", err) os.Exit(1) } v1api := v1.NewAPI(client) ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() lbls, warnings, err := v1api.Series(ctx, []string{ "{__name__=~\"scrape_.+\",job=\"node\"}", "{__name__=~\"scrape_.+\",job=\"prometheus\"}", }, time.Now().Add(-time.Hour), time.Now()) if err != nil { fmt.Printf("Error querying Prometheus: %v\n", err) os.Exit(1) } if len(warnings) > 0 { fmt.Printf("Warnings: %v\n", warnings) } fmt.Println("Result:") for _, lbl := range lbls { fmt.Println(lbl) } } client_golang-1.11.0/examples/000077500000000000000000000000001405741072000162155ustar00rootroot00000000000000client_golang-1.11.0/examples/random/000077500000000000000000000000001405741072000174755ustar00rootroot00000000000000client_golang-1.11.0/examples/random/main.go000066400000000000000000000103471405741072000207550ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // A simple example exposing fictional RPC latencies with different types of // random distributions (uniform, normal, and exponential) as Prometheus // metrics. package main import ( "flag" "fmt" "log" "math" "math/rand" "net/http" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) var ( addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.") normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.") normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.") oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.") ) var ( // Create a summary to track fictional interservice RPC latencies for three // distinct services with different latency distributions. These services are // differentiated via a "service" label. rpcDurations = prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "rpc_durations_seconds", Help: "RPC latency distributions.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, []string{"service"}, ) // The same as above, but now as a histogram, and only for the normal // distribution. The buckets are targeted to the parameters of the // normal distribution, with 20 buckets centered on the mean, each // half-sigma wide. rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "rpc_durations_histogram_seconds", Help: "RPC latency distributions.", Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20), }) ) func init() { // Register the summary and the histogram with Prometheus's default registry. prometheus.MustRegister(rpcDurations) prometheus.MustRegister(rpcDurationsHistogram) // Add Go module build info. prometheus.MustRegister(prometheus.NewBuildInfoCollector()) } func main() { flag.Parse() start := time.Now() oscillationFactor := func() float64 { return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod))) } // Periodically record some sample latencies for the three services. go func() { for { v := rand.Float64() * *uniformDomain rpcDurations.WithLabelValues("uniform").Observe(v) time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond) } }() go func() { for { v := (rand.NormFloat64() * *normDomain) + *normMean rpcDurations.WithLabelValues("normal").Observe(v) // Demonstrate exemplar support with a dummy ID. This // would be something like a trace ID in a real // application. Note the necessary type assertion. We // already know that rpcDurationsHistogram implements // the ExemplarObserver interface and thus don't need to // check the outcome of the type assertion. rpcDurationsHistogram.(prometheus.ExemplarObserver).ObserveWithExemplar( v, prometheus.Labels{"dummyID": fmt.Sprint(rand.Intn(100000))}, ) time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond) } }() go func() { for { v := rand.ExpFloat64() / 1e6 rpcDurations.WithLabelValues("exponential").Observe(v) time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond) } }() // Expose the registered metrics via HTTP. http.Handle("/metrics", promhttp.HandlerFor( prometheus.DefaultGatherer, promhttp.HandlerOpts{ // Opt into OpenMetrics to support exemplars. EnableOpenMetrics: true, }, )) log.Fatal(http.ListenAndServe(*addr, nil)) } client_golang-1.11.0/examples/simple/000077500000000000000000000000001405741072000175065ustar00rootroot00000000000000client_golang-1.11.0/examples/simple/main.go000066400000000000000000000017361405741072000207700ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // A minimal example of how to include Prometheus instrumentation. package main import ( "flag" "log" "net/http" "github.com/prometheus/client_golang/prometheus/promhttp" ) var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.") func main() { flag.Parse() http.Handle("/metrics", promhttp.Handler()) log.Fatal(http.ListenAndServe(*addr, nil)) } client_golang-1.11.0/go.mod000066400000000000000000000007241405741072000155100ustar00rootroot00000000000000module github.com/prometheus/client_golang require ( github.com/beorn7/perks v1.0.1 github.com/cespare/xxhash/v2 v2.1.1 github.com/golang/protobuf v1.4.3 github.com/json-iterator/go v1.1.11 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.26.0 github.com/prometheus/procfs v0.6.0 golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 golang.org/x/text v0.3.2 // indirect google.golang.org/protobuf v1.26.0-rc.1 // indirect ) go 1.13 client_golang-1.11.0/go.sum000066400000000000000000000346251405741072000155440ustar00rootroot00000000000000cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1 h1:7QnIQpGRHE5RnLKnESfDoxm2dTapTZua5a0kS0A+VXQ= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= client_golang-1.11.0/prometheus/000077500000000000000000000000001405741072000165725ustar00rootroot00000000000000client_golang-1.11.0/prometheus/.gitignore000066400000000000000000000000341405741072000205570ustar00rootroot00000000000000command-line-arguments.test client_golang-1.11.0/prometheus/README.md000066400000000000000000000002401405741072000200450ustar00rootroot00000000000000See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). client_golang-1.11.0/prometheus/benchmark_test.go000066400000000000000000000105471405741072000221210ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "sync" "testing" ) func BenchmarkCounterWithLabelValues(b *testing.B) { m := NewCounterVec( CounterOpts{ Name: "benchmark_counter", Help: "A counter to benchmark it.", }, []string{"one", "two", "three"}, ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.WithLabelValues("eins", "zwei", "drei").Inc() } } func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { m := NewCounterVec( CounterOpts{ Name: "benchmark_counter", Help: "A counter to benchmark it.", }, []string{"one", "two", "three"}, ) b.ReportAllocs() b.ResetTimer() wg := sync.WaitGroup{} for i := 0; i < 10; i++ { wg.Add(1) go func() { for j := 0; j < b.N/10; j++ { m.WithLabelValues("eins", "zwei", "drei").Inc() } wg.Done() }() } wg.Wait() } func BenchmarkCounterWithMappedLabels(b *testing.B) { m := NewCounterVec( CounterOpts{ Name: "benchmark_counter", Help: "A counter to benchmark it.", }, []string{"one", "two", "three"}, ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() } } func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { m := NewCounterVec( CounterOpts{ Name: "benchmark_counter", Help: "A counter to benchmark it.", }, []string{"one", "two", "three"}, ) b.ReportAllocs() b.ResetTimer() labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} for i := 0; i < b.N; i++ { m.With(labels).Inc() } } func BenchmarkCounterNoLabels(b *testing.B) { m := NewCounter(CounterOpts{ Name: "benchmark_counter", Help: "A counter to benchmark it.", }) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.Inc() } } func BenchmarkGaugeWithLabelValues(b *testing.B) { m := NewGaugeVec( GaugeOpts{ Name: "benchmark_gauge", Help: "A gauge to benchmark it.", }, []string{"one", "two", "three"}, ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) } } func BenchmarkGaugeNoLabels(b *testing.B) { m := NewGauge(GaugeOpts{ Name: "benchmark_gauge", Help: "A gauge to benchmark it.", }) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.Set(3.1415) } } func BenchmarkSummaryWithLabelValues(b *testing.B) { m := NewSummaryVec( SummaryOpts{ Name: "benchmark_summary", Help: "A summary to benchmark it.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, []string{"one", "two", "three"}, ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) } } func BenchmarkSummaryNoLabels(b *testing.B) { m := NewSummary(SummaryOpts{ Name: "benchmark_summary", Help: "A summary to benchmark it.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.Observe(3.1415) } } func BenchmarkHistogramWithLabelValues(b *testing.B) { m := NewHistogramVec( HistogramOpts{ Name: "benchmark_histogram", Help: "A histogram to benchmark it.", }, []string{"one", "two", "three"}, ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) } } func BenchmarkHistogramNoLabels(b *testing.B) { m := NewHistogram(HistogramOpts{ Name: "benchmark_histogram", Help: "A histogram to benchmark it.", }, ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { m.Observe(3.1415) } } func BenchmarkParallelCounter(b *testing.B) { c := NewCounter(CounterOpts{ Name: "benchmark_counter", Help: "A Counter to benchmark it.", }) b.ReportAllocs() b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { c.Inc() } }) } client_golang-1.11.0/prometheus/collector.go000066400000000000000000000123021405741072000211050ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus // Collector is the interface implemented by anything that can be used by // Prometheus to collect metrics. A Collector has to be registered for // collection. See Registerer.Register. // // The stock metrics provided by this package (Gauge, Counter, Summary, // Histogram, Untyped) are also Collectors (which only ever collect one metric, // namely itself). An implementer of Collector may, however, collect multiple // metrics in a coordinated fashion and/or create metrics on the fly. Examples // for collectors already implemented in this library are the metric vectors // (i.e. collection of multiple instances of the same Metric but with different // label values) like GaugeVec or SummaryVec, and the ExpvarCollector. type Collector interface { // Describe sends the super-set of all possible descriptors of metrics // collected by this Collector to the provided channel and returns once // the last descriptor has been sent. The sent descriptors fulfill the // consistency and uniqueness requirements described in the Desc // documentation. // // It is valid if one and the same Collector sends duplicate // descriptors. Those duplicates are simply ignored. However, two // different Collectors must not send duplicate descriptors. // // Sending no descriptor at all marks the Collector as “unchecked”, // i.e. no checks will be performed at registration time, and the // Collector may yield any Metric it sees fit in its Collect method. // // This method idempotently sends the same descriptors throughout the // lifetime of the Collector. It may be called concurrently and // therefore must be implemented in a concurrency safe way. // // If a Collector encounters an error while executing this method, it // must send an invalid descriptor (created with NewInvalidDesc) to // signal the error to the registry. Describe(chan<- *Desc) // Collect is called by the Prometheus registry when collecting // metrics. The implementation sends each collected metric via the // provided channel and returns once the last metric has been sent. The // descriptor of each sent metric is one of those returned by Describe // (unless the Collector is unchecked, see above). Returned metrics that // share the same descriptor must differ in their variable label // values. // // This method may be called concurrently and must therefore be // implemented in a concurrency safe way. Blocking occurs at the expense // of total performance of rendering all registered metrics. Ideally, // Collector implementations support concurrent readers. Collect(chan<- Metric) } // DescribeByCollect is a helper to implement the Describe method of a custom // Collector. It collects the metrics from the provided Collector and sends // their descriptors to the provided channel. // // If a Collector collects the same metrics throughout its lifetime, its // Describe method can simply be implemented as: // // func (c customCollector) Describe(ch chan<- *Desc) { // DescribeByCollect(c, ch) // } // // However, this will not work if the metrics collected change dynamically over // the lifetime of the Collector in a way that their combined set of descriptors // changes as well. The shortcut implementation will then violate the contract // of the Describe method. If a Collector sometimes collects no metrics at all // (for example vectors like CounterVec, GaugeVec, etc., which only collect // metrics after a metric with a fully specified label set has been accessed), // it might even get registered as an unchecked Collector (cf. the Register // method of the Registerer interface). Hence, only use this shortcut // implementation of Describe if you are certain to fulfill the contract. // // The Collector example demonstrates a use of DescribeByCollect. func DescribeByCollect(c Collector, descs chan<- *Desc) { metrics := make(chan Metric) go func() { c.Collect(metrics) close(metrics) }() for m := range metrics { descs <- m.Desc() } } // selfCollector implements Collector for a single Metric so that the Metric // collects itself. Add it as an anonymous field to a struct that implements // Metric, and call init with the Metric itself as an argument. type selfCollector struct { self Metric } // init provides the selfCollector with a reference to the metric it is supposed // to collect. It is usually called within the factory function to create a // metric. See example. func (c *selfCollector) init(self Metric) { c.self = self } // Describe implements Collector. func (c *selfCollector) Describe(ch chan<- *Desc) { ch <- c.self.Desc() } // Collect implements Collector. func (c *selfCollector) Collect(ch chan<- Metric) { ch <- c.self } client_golang-1.11.0/prometheus/collector_test.go000066400000000000000000000035441405741072000221540ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import "testing" type collectorDescribedByCollect struct { cnt Counter gge Gauge } func (c collectorDescribedByCollect) Collect(ch chan<- Metric) { ch <- c.cnt ch <- c.gge } func (c collectorDescribedByCollect) Describe(ch chan<- *Desc) { DescribeByCollect(c, ch) } func TestDescribeByCollect(t *testing.T) { goodCollector := collectorDescribedByCollect{ cnt: NewCounter(CounterOpts{Name: "c1", Help: "help c1"}), gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}), } collidingCollector := collectorDescribedByCollect{ cnt: NewCounter(CounterOpts{Name: "c2", Help: "help c2"}), gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}), } inconsistentCollector := collectorDescribedByCollect{ cnt: NewCounter(CounterOpts{Name: "c3", Help: "help c3"}), gge: NewGauge(GaugeOpts{Name: "c3", Help: "help inconsistent"}), } reg := NewPedanticRegistry() if err := reg.Register(goodCollector); err != nil { t.Error("registration failed:", err) } if err := reg.Register(collidingCollector); err == nil { t.Error("registration unexpectedly succeeded") } if err := reg.Register(inconsistentCollector); err == nil { t.Error("registration unexpectedly succeeded") } if _, err := reg.Gather(); err != nil { t.Error("gathering failed:", err) } } client_golang-1.11.0/prometheus/collectors/000077500000000000000000000000001405741072000207435ustar00rootroot00000000000000client_golang-1.11.0/prometheus/collectors/collectors.go000066400000000000000000000013461405741072000234470ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package collectors provides implementations of prometheus.Collector to // conveniently collect process and Go-related metrics. package collectors client_golang-1.11.0/prometheus/collectors/dbstats_collector.go000066400000000000000000000105121405741072000250030ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collectors import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) type dbStatsCollector struct { db *sql.DB maxOpenConnections *prometheus.Desc openConnections *prometheus.Desc inUseConnections *prometheus.Desc idleConnections *prometheus.Desc waitCount *prometheus.Desc waitDuration *prometheus.Desc maxIdleClosed *prometheus.Desc maxIdleTimeClosed *prometheus.Desc maxLifetimeClosed *prometheus.Desc } // NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. // See https://golang.org/pkg/database/sql/#DBStats for more information on stats. func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { fqName := func(name string) string { return "go_sql_" + name } return &dbStatsCollector{ db: db, maxOpenConnections: prometheus.NewDesc( fqName("max_open_connections"), "Maximum number of open connections to the database.", nil, prometheus.Labels{"db_name": dbName}, ), openConnections: prometheus.NewDesc( fqName("open_connections"), "The number of established connections both in use and idle.", nil, prometheus.Labels{"db_name": dbName}, ), inUseConnections: prometheus.NewDesc( fqName("in_use_connections"), "The number of connections currently in use.", nil, prometheus.Labels{"db_name": dbName}, ), idleConnections: prometheus.NewDesc( fqName("idle_connections"), "The number of idle connections.", nil, prometheus.Labels{"db_name": dbName}, ), waitCount: prometheus.NewDesc( fqName("wait_count_total"), "The total number of connections waited for.", nil, prometheus.Labels{"db_name": dbName}, ), waitDuration: prometheus.NewDesc( fqName("wait_duration_seconds_total"), "The total time blocked waiting for a new connection.", nil, prometheus.Labels{"db_name": dbName}, ), maxIdleClosed: prometheus.NewDesc( fqName("max_idle_closed_total"), "The total number of connections closed due to SetMaxIdleConns.", nil, prometheus.Labels{"db_name": dbName}, ), maxIdleTimeClosed: prometheus.NewDesc( fqName("max_idle_time_closed_total"), "The total number of connections closed due to SetConnMaxIdleTime.", nil, prometheus.Labels{"db_name": dbName}, ), maxLifetimeClosed: prometheus.NewDesc( fqName("max_lifetime_closed_total"), "The total number of connections closed due to SetConnMaxLifetime.", nil, prometheus.Labels{"db_name": dbName}, ), } } // Describe implements Collector. func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { ch <- c.maxOpenConnections ch <- c.openConnections ch <- c.inUseConnections ch <- c.idleConnections ch <- c.waitCount ch <- c.waitDuration ch <- c.maxIdleClosed ch <- c.maxLifetimeClosed c.describeNewInGo115(ch) } // Collect implements Collector. func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { stats := c.db.Stats() ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) c.collectNewInGo115(ch, stats) } client_golang-1.11.0/prometheus/collectors/dbstats_collector_go115.go000066400000000000000000000020061405741072000257160ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build go1.15 package collectors import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) { ch <- c.maxIdleTimeClosed } func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) { ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) } client_golang-1.11.0/prometheus/collectors/dbstats_collector_pre_go115.go000066400000000000000000000015651405741072000265750ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !go1.15 package collectors import ( "database/sql" "github.com/prometheus/client_golang/prometheus" ) func (c *dbStatsCollector) describeNewInGo115(ch chan<- *prometheus.Desc) {} func (c *dbStatsCollector) collectNewInGo115(ch chan<- prometheus.Metric, stats sql.DBStats) {} client_golang-1.11.0/prometheus/collectors/dbstats_collector_test.go000066400000000000000000000047171405741072000260540ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collectors import ( "database/sql" "runtime" "testing" "github.com/prometheus/client_golang/prometheus" ) func TestDBStatsCollector(t *testing.T) { reg := prometheus.NewRegistry() { db := new(sql.DB) if err := reg.Register(NewDBStatsCollector(db, "db_A")); err != nil { t.Fatal(err) } } { db := new(sql.DB) if err := reg.Register(NewDBStatsCollector(db, "db_B")); err != nil { t.Fatal(err) } } mfs, err := reg.Gather() if err != nil { t.Fatal(err) } names := []string{ "go_sql_max_open_connections", "go_sql_open_connections", "go_sql_in_use_connections", "go_sql_idle_connections", "go_sql_wait_count_total", "go_sql_wait_duration_seconds_total", "go_sql_max_idle_closed_total", "go_sql_max_lifetime_closed_total", } if runtime.Version() >= "go1.15" { names = append(names, "go_sql_max_idle_time_closed_total") } type result struct { found bool } results := make(map[string]result) for _, name := range names { results[name] = result{found: false} } for _, mf := range mfs { m := mf.GetMetric() if len(m) != 2 { t.Errorf("expected 2 metrics bug got %d", len(m)) } labelA := m[0].GetLabel()[0] if name := labelA.GetName(); name != "db_name" { t.Errorf("expected to get label \"db_name\" but got %s", name) } if value := labelA.GetValue(); value != "db_A" { t.Errorf("expected to get value \"db_A\" but got %s", value) } labelB := m[1].GetLabel()[0] if name := labelB.GetName(); name != "db_name" { t.Errorf("expected to get label \"db_name\" but got %s", name) } if value := labelB.GetValue(); value != "db_B" { t.Errorf("expected to get value \"db_B\" but got %s", value) } for _, name := range names { if name == mf.GetName() { results[name] = result{found: true} break } } } for name, result := range results { if !result.found { t.Errorf("%s not found", name) } } } client_golang-1.11.0/prometheus/collectors/expvar_collector.go000066400000000000000000000054351405741072000246540ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collectors import "github.com/prometheus/client_golang/prometheus" // NewExpvarCollector returns a newly allocated expvar Collector. // // An expvar Collector collects metrics from the expvar interface. It provides a // quick way to expose numeric values that are already exported via expvar as // Prometheus metrics. Note that the data models of expvar and Prometheus are // fundamentally different, and that the expvar Collector is inherently slower // than native Prometheus metrics. Thus, the expvar Collector is probably great // for experiments and prototying, but you should seriously consider a more // direct implementation of Prometheus metrics for monitoring production // systems. // // The exports map has the following meaning: // // The keys in the map correspond to expvar keys, i.e. for every expvar key you // want to export as Prometheus metric, you need an entry in the exports // map. The descriptor mapped to each key describes how to export the expvar // value. It defines the name and the help string of the Prometheus metric // proxying the expvar value. The type will always be Untyped. // // For descriptors without variable labels, the expvar value must be a number or // a bool. The number is then directly exported as the Prometheus sample // value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values // that are not numbers or bools are silently ignored. // // If the descriptor has one variable label, the expvar value must be an expvar // map. The keys in the expvar map become the various values of the one // Prometheus label. The values in the expvar map must be numbers or bools again // as above. // // For descriptors with more than one variable label, the expvar must be a // nested expvar map, i.e. where the values of the topmost map are maps again // etc. until a depth is reached that corresponds to the number of labels. The // leaves of that structure must be numbers or bools as above to serve as the // sample values. // // Anything that does not fit into the scheme above is silently ignored. func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { //nolint:staticcheck // Ignore SA1019 until v2. return prometheus.NewExpvarCollector(exports) } client_golang-1.11.0/prometheus/collectors/go_collector.go000066400000000000000000000071531405741072000237530ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collectors import "github.com/prometheus/client_golang/prometheus" // NewGoCollector returns a collector that exports metrics about the current Go // process. This includes memory stats. To collect those, runtime.ReadMemStats // is called. This requires to “stop the world”, which usually only happens for // garbage collection (GC). Take the following implications into account when // deciding whether to use the Go collector: // // 1. The performance impact of stopping the world is the more relevant the more // frequently metrics are collected. However, with Go1.9 or later the // stop-the-world time per metrics collection is very short (~25µs) so that the // performance impact will only matter in rare cases. However, with older Go // versions, the stop-the-world duration depends on the heap size and can be // quite significant (~1.7 ms/GiB as per // https://go-review.googlesource.com/c/go/+/34937). // // 2. During an ongoing GC, nothing else can stop the world. Therefore, if the // metrics collection happens to coincide with GC, it will only complete after // GC has finished. Usually, GC is fast enough to not cause problems. However, // with a very large heap, GC might take multiple seconds, which is enough to // cause scrape timeouts in common setups. To avoid this problem, the Go // collector will use the memstats from a previous collection if // runtime.ReadMemStats takes more than 1s. However, if there are no previously // collected memstats, or their collection is more than 5m ago, the collection // will block until runtime.ReadMemStats succeeds. // // NOTE: The problem is solved in Go 1.15, see // https://github.com/golang/go/issues/19812 for the related Go issue. func NewGoCollector() prometheus.Collector { //nolint:staticcheck // Ignore SA1019 until v2. return prometheus.NewGoCollector() } // NewBuildInfoCollector returns a collector collecting a single metric // "go_build_info" with the constant value 1 and three labels "path", "version", // and "checksum". Their label values contain the main module path, version, and // checksum, respectively. The labels will only have meaningful values if the // binary is built with Go module support and from source code retrieved from // the source repository (rather than the local file system). This is usually // accomplished by building from outside of GOPATH, specifying the full address // of the main package, e.g. "GO111MODULE=on go run // github.com/prometheus/client_golang/examples/random". If built without Go // module support, all label values will be "unknown". If built with Go module // support but using the source code from the local file system, the "path" will // be set appropriately, but "checksum" will be empty and "version" will be // "(devel)". // // This collector uses only the build information for the main module. See // https://github.com/povilasv/prommod for an example of a collector for the // module dependencies. func NewBuildInfoCollector() prometheus.Collector { //nolint:staticcheck // Ignore SA1019 until v2. return prometheus.NewBuildInfoCollector() } client_golang-1.11.0/prometheus/collectors/process_collector.go000066400000000000000000000050411405741072000250160ustar00rootroot00000000000000// Copyright 2021 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package collectors import "github.com/prometheus/client_golang/prometheus" // ProcessCollectorOpts defines the behavior of a process metrics collector // created with NewProcessCollector. type ProcessCollectorOpts struct { // PidFn returns the PID of the process the collector collects metrics // for. It is called upon each collection. By default, the PID of the // current process is used, as determined on construction time by // calling os.Getpid(). PidFn func() (int, error) // If non-empty, each of the collected metrics is prefixed by the // provided string and an underscore ("_"). Namespace string // If true, any error encountered during collection is reported as an // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored // and the collected metrics will be incomplete. (Possibly, no metrics // will be collected at all.) While that's usually not desired, it is // appropriate for the common "mix-in" of process metrics, where process // metrics are nice to have, but failing to collect them should not // disrupt the collection of the remaining metrics. ReportErrors bool } // NewProcessCollector returns a collector which exports the current state of // process metrics including CPU, memory and file descriptor usage as well as // the process start time. The detailed behavior is defined by the provided // ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a // collector for the current process with an empty namespace string and no error // reporting. // // The collector only works on operating systems with a Linux-style proc // filesystem and on Microsoft Windows. On other operating systems, it will not // collect any metrics. func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { //nolint:staticcheck // Ignore SA1019 until v2. return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ PidFn: opts.PidFn, Namespace: opts.Namespace, ReportErrors: opts.ReportErrors, }) } client_golang-1.11.0/prometheus/counter.go000066400000000000000000000264041405741072000206060ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "errors" "math" "sync/atomic" "time" dto "github.com/prometheus/client_model/go" ) // Counter is a Metric that represents a single numerical value that only ever // goes up. That implies that it cannot be used to count items whose number can // also go down, e.g. the number of currently running goroutines. Those // "counters" are represented by Gauges. // // A Counter is typically used to count requests served, tasks completed, errors // occurred, etc. // // To create Counter instances, use NewCounter. type Counter interface { Metric Collector // Inc increments the counter by 1. Use Add to increment it by arbitrary // non-negative values. Inc() // Add adds the given value to the counter. It panics if the value is < // 0. Add(float64) } // ExemplarAdder is implemented by Counters that offer the option of adding a // value to the Counter together with an exemplar. Its AddWithExemplar method // works like the Add method of the Counter interface but also replaces the // currently saved exemplar (if any) with a new one, created from the provided // value, the current time as timestamp, and the provided labels. Empty Labels // will lead to a valid (label-less) exemplar. But if Labels is nil, the current // exemplar is left in place. AddWithExemplar panics if the value is < 0, if any // of the provided labels are invalid, or if the provided labels contain more // than 64 runes in total. type ExemplarAdder interface { AddWithExemplar(value float64, exemplar Labels) } // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts Opts // NewCounter creates a new Counter based on the provided CounterOpts. // // The returned implementation also implements ExemplarAdder. It is safe to // perform the corresponding type assertion. // // The returned implementation tracks the counter value in two separate // variables, a float64 and a uint64. The latter is used to track calls of the // Inc method and calls of the Add method with a value that can be represented // as a uint64. This allows atomic increments of the counter with optimal // performance. (It is common to have an Inc call in very hot execution paths.) // Both internal tracking values are added up in the Write method. This has to // be taken into account when it comes to precision and overflow behavior. func NewCounter(opts CounterOpts) Counter { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ) result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} result.init(result) // Init self-collection. return result } type counter struct { // valBits contains the bits of the represented float64 value, while // valInt stores values that are exact integers. Both have to go first // in the struct to guarantee alignment for atomic operations. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG valBits uint64 valInt uint64 selfCollector desc *Desc labelPairs []*dto.LabelPair exemplar atomic.Value // Containing nil or a *dto.Exemplar. now func() time.Time // To mock out time.Now() for testing. } func (c *counter) Desc() *Desc { return c.desc } func (c *counter) Add(v float64) { if v < 0 { panic(errors.New("counter cannot decrease in value")) } ival := uint64(v) if float64(ival) == v { atomic.AddUint64(&c.valInt, ival) return } for { oldBits := atomic.LoadUint64(&c.valBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + v) if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { return } } } func (c *counter) AddWithExemplar(v float64, e Labels) { c.Add(v) c.updateExemplar(v, e) } func (c *counter) Inc() { atomic.AddUint64(&c.valInt, 1) } func (c *counter) Write(out *dto.Metric) error { fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) ival := atomic.LoadUint64(&c.valInt) val := fval + float64(ival) var exemplar *dto.Exemplar if e := c.exemplar.Load(); e != nil { exemplar = e.(*dto.Exemplar) } return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) } func (c *counter) updateExemplar(v float64, l Labels) { if l == nil { return } e, err := newExemplar(v, c.now(), l) if err != nil { panic(err) } c.exemplar.Store(e) } // CounterVec is a Collector that bundles a set of Counters that all share the // same Desc, but have different values for their variable labels. This is used // if you want to count the same thing partitioned by various dimensions // (e.g. number of HTTP requests, partitioned by response code and // method). Create instances with NewCounterVec. type CounterVec struct { *MetricVec } // NewCounterVec creates a new CounterVec based on the provided CounterOpts and // partitioned by the given label names. func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, labelNames, opts.ConstLabels, ) return &CounterVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result.init(result) // Init self-collection. return result }), } } // GetMetricWithLabelValues returns the Counter for the given slice of label // values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Counter is created. // // It is possible to call this method without using the returned Counter to only // create the new Counter but leave it at its starting value 0. See also the // SummaryVec example. // // Keeping the Counter for later use is possible (and should be considered if // performance is critical), but keep in mind that Reset, DeleteLabelValues and // Delete can be used to delete the Counter from the CounterVec. In that case, // the Counter will still exist, but it will not be exported anymore, even if a // Counter with the same label values is created later. // // An error is returned if the number of label values is not the same as the // number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // an alternative to avoid that type of mistake. For higher label numbers, the // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). // See also the GaugeVec example. func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Counter), err } return nil, err } // GetMetricWith returns the Counter for the given Labels map (the label names // must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Counter is created. Implications of // creating a Counter without using it and keeping the Counter for later use are // the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent // with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Counter), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) func (v *CounterVec) WithLabelValues(lvs ...string) Counter { c, err := v.GetMetricWithLabelValues(lvs...) if err != nil { panic(err) } return c } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *CounterVec) With(labels Labels) Counter { c, err := v.GetMetricWith(labels) if err != nil { panic(err) } return c } // CurryWith returns a vector curried with the provided labels, i.e. the // returned vector has those labels pre-set for all labeled operations performed // on it. The cardinality of the curried vector is reduced accordingly. The // order of the remaining labels stays the same (just with the curried labels // taken out of the sequence – which is relevant for the // (GetMetric)WithLabelValues methods). It is possible to curry a curried // vector, but only with labels not yet used for currying before. // // The metrics contained in the CounterVec are shared between the curried and // uncurried vectors. They are just accessed differently. Curried and uncurried // vectors behave identically in terms of collection. Only one must be // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &CounterVec{vec}, err } return nil, err } // MustCurryWith works as CurryWith but panics where CurryWith would have // returned an error. func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { vec, err := v.CurryWith(labels) if err != nil { panic(err) } return vec } // CounterFunc is a Counter whose value is determined at collect time by calling a // provided function. // // To create CounterFunc instances, use NewCounterFunc. type CounterFunc interface { Metric Collector } // NewCounterFunc creates a new CounterFunc based on the provided // CounterOpts. The value reported is determined by calling the given function // from within the Write method. Take into account that metric collection may // happen concurrently. If that results in concurrent calls to Write, like in // the case where a CounterFunc is directly registered with Prometheus, the // provided function must be concurrency-safe. The function should also honor // the contract for a Counter (values only go up, not down), but compliance will // not be checked. // // Check out the ExampleGaugeFunc examples for the similar GaugeFunc. func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), CounterValue, function) } client_golang-1.11.0/prometheus/counter_test.go000066400000000000000000000172251405741072000216460ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "math" "testing" "time" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" dto "github.com/prometheus/client_model/go" ) func TestCounterAdd(t *testing.T) { counter := NewCounter(CounterOpts{ Name: "test", Help: "test help", ConstLabels: Labels{"a": "1", "b": "2"}, }).(*counter) counter.Inc() if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { t.Errorf("Expected %f, got %f.", expected, got) } if expected, got := uint64(1), counter.valInt; expected != got { t.Errorf("Expected %d, got %d.", expected, got) } counter.Add(42) if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { t.Errorf("Expected %f, got %f.", expected, got) } if expected, got := uint64(43), counter.valInt; expected != got { t.Errorf("Expected %d, got %d.", expected, got) } counter.Add(24.42) if expected, got := 24.42, math.Float64frombits(counter.valBits); expected != got { t.Errorf("Expected %f, got %f.", expected, got) } if expected, got := uint64(43), counter.valInt; expected != got { t.Errorf("Expected %d, got %d.", expected, got) } if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { t.Errorf("Expected error %q, got %q.", expected, got) } m := &dto.Metric{} counter.Write(m) if expected, got := `label: label: counter: `, m.String(); expected != got { t.Errorf("expected %q, got %q", expected, got) } } func decreaseCounter(c *counter) (err error) { defer func() { if e := recover(); e != nil { err = e.(error) } }() c.Add(-1) return nil } func TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) { testCases := []struct { desc string labels Labels }{ { desc: "non utf8 label value", labels: Labels{"a": "\xFF"}, }, { desc: "not enough label values", labels: Labels{}, }, { desc: "too many label values", labels: Labels{"a": "1", "b": "2"}, }, } for _, test := range testCases { counterVec := NewCounterVec(CounterOpts{ Name: "test", }, []string{"a"}) labelValues := make([]string, len(test.labels)) for _, val := range test.labels { labelValues = append(labelValues, val) } expectPanic(t, func() { counterVec.WithLabelValues(labelValues...) }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) expectPanic(t, func() { counterVec.With(test.labels) }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) if _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil { t.Errorf("GetMetricWithLabelValues: expected error because: %s", test.desc) } if _, err := counterVec.GetMetricWith(test.labels); err == nil { t.Errorf("GetMetricWith: expected error because: %s", test.desc) } } } func expectPanic(t *testing.T, op func(), errorMsg string) { defer func() { if err := recover(); err == nil { t.Error(errorMsg) } }() op() } func TestCounterAddInf(t *testing.T) { counter := NewCounter(CounterOpts{ Name: "test", Help: "test help", }).(*counter) counter.Inc() if expected, got := 0.0, math.Float64frombits(counter.valBits); expected != got { t.Errorf("Expected %f, got %f.", expected, got) } if expected, got := uint64(1), counter.valInt; expected != got { t.Errorf("Expected %d, got %d.", expected, got) } counter.Add(math.Inf(1)) if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got { t.Errorf("valBits expected %f, got %f.", expected, got) } if expected, got := uint64(1), counter.valInt; expected != got { t.Errorf("valInts expected %d, got %d.", expected, got) } counter.Inc() if expected, got := math.Inf(1), math.Float64frombits(counter.valBits); expected != got { t.Errorf("Expected %f, got %f.", expected, got) } if expected, got := uint64(2), counter.valInt; expected != got { t.Errorf("Expected %d, got %d.", expected, got) } m := &dto.Metric{} counter.Write(m) if expected, got := `counter: `, m.String(); expected != got { t.Errorf("expected %q, got %q", expected, got) } } func TestCounterAddLarge(t *testing.T) { counter := NewCounter(CounterOpts{ Name: "test", Help: "test help", }).(*counter) // large overflows the underlying type and should therefore be stored in valBits. large := math.Nextafter(float64(math.MaxUint64), 1e20) counter.Add(large) if expected, got := large, math.Float64frombits(counter.valBits); expected != got { t.Errorf("valBits expected %f, got %f.", expected, got) } if expected, got := uint64(0), counter.valInt; expected != got { t.Errorf("valInts expected %d, got %d.", expected, got) } m := &dto.Metric{} counter.Write(m) if expected, got := fmt.Sprintf("counter: ", large), m.String(); expected != got { t.Errorf("expected %q, got %q", expected, got) } } func TestCounterAddSmall(t *testing.T) { counter := NewCounter(CounterOpts{ Name: "test", Help: "test help", }).(*counter) small := 0.000000000001 counter.Add(small) if expected, got := small, math.Float64frombits(counter.valBits); expected != got { t.Errorf("valBits expected %f, got %f.", expected, got) } if expected, got := uint64(0), counter.valInt; expected != got { t.Errorf("valInts expected %d, got %d.", expected, got) } m := &dto.Metric{} counter.Write(m) if expected, got := fmt.Sprintf("counter: ", small), m.String(); expected != got { t.Errorf("expected %q, got %q", expected, got) } } func TestCounterExemplar(t *testing.T) { now := time.Now() counter := NewCounter(CounterOpts{ Name: "test", Help: "test help", }).(*counter) counter.now = func() time.Time { return now } ts, err := ptypes.TimestampProto(now) if err != nil { t.Fatal(err) } expectedExemplar := &dto.Exemplar{ Label: []*dto.LabelPair{ &dto.LabelPair{Name: proto.String("foo"), Value: proto.String("bar")}, }, Value: proto.Float64(42), Timestamp: ts, } counter.AddWithExemplar(42, Labels{"foo": "bar"}) if expected, got := expectedExemplar.String(), counter.exemplar.Load().(*dto.Exemplar).String(); expected != got { t.Errorf("expected exemplar %s, got %s.", expected, got) } addExemplarWithInvalidLabel := func() (err error) { defer func() { if e := recover(); e != nil { err = e.(error) } }() // Should panic because of invalid label name. counter.AddWithExemplar(42, Labels{":o)": "smile"}) return nil } if addExemplarWithInvalidLabel() == nil { t.Error("adding exemplar with invalid label succeeded") } addExemplarWithOversizedLabels := func() (err error) { defer func() { if e := recover(); e != nil { err = e.(error) } }() // Should panic because of 65 runes. counter.AddWithExemplar(42, Labels{ "abcdefghijklmnopqrstuvwxyz": "26+16 characters", "x1234567": "8+15 characters", }) return nil } if addExemplarWithOversizedLabels() == nil { t.Error("adding exemplar with oversized labels succeeded") } } client_golang-1.11.0/prometheus/desc.go000066400000000000000000000151551405741072000200460ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "errors" "fmt" "sort" "strings" "github.com/cespare/xxhash/v2" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) // Desc is the descriptor used by every Prometheus Metric. It is essentially // the immutable meta-data of a Metric. The normal Metric implementations // included in this package manage their Desc under the hood. Users only have to // deal with Desc if they use advanced features like the ExpvarCollector or // custom Collectors and Metrics. // // Descriptors registered with the same registry have to fulfill certain // consistency and uniqueness criteria if they share the same fully-qualified // name: They must have the same help string and the same label names (aka label // dimensions) in each, constLabels and variableLabels, but they must differ in // the values of the constLabels. // // Descriptors that share the same fully-qualified names and the same label // values of their constLabels are considered equal. // // Use NewDesc to create new Desc instances. type Desc struct { // fqName has been built from Namespace, Subsystem, and Name. fqName string // help provides some helpful information about this metric. help string // constLabelPairs contains precalculated DTO label pairs based on // the constant labels. constLabelPairs []*dto.LabelPair // variableLabels contains names of labels for which the metric // maintains variable values. variableLabels []string // id is a hash of the values of the ConstLabels and fqName. This // must be unique among all registered descriptors and can therefore be // used as an identifier of the descriptor. id uint64 // dimHash is a hash of the label names (preset and variable) and the // Help string. Each Desc with the same fqName must have the same // dimHash. dimHash uint64 // err is an error that occurred during construction. It is reported on // registration time. err error } // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // and will be reported on registration time. variableLabels and constLabels can // be nil if no such labels should be set. fqName must not be empty. // // variableLabels only contain the label names. Their label values are variable // and therefore not part of the Desc. (They are managed within the Metric.) // // For constLabels, the label values are constant. Therefore, they are fully // specified in the Desc. See the Collector example for a usage pattern. func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { d := &Desc{ fqName: fqName, help: help, variableLabels: variableLabels, } if !model.IsValidMetricName(model.LabelValue(fqName)) { d.err = fmt.Errorf("%q is not a valid metric name", fqName) return d } // labelValues contains the label values of const labels (in order of // their sorted label names) plus the fqName (at position 0). labelValues := make([]string, 1, len(constLabels)+1) labelValues[0] = fqName labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) labelNameSet := map[string]struct{}{} // First add only the const label names and sort them... for labelName := range constLabels { if !checkLabelName(labelName) { d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, labelName) labelNameSet[labelName] = struct{}{} } sort.Strings(labelNames) // ... so that we can now add const label values in the order of their names. for _, labelName := range labelNames { labelValues = append(labelValues, constLabels[labelName]) } // Validate the const label values. They can't have a wrong cardinality, so // use in len(labelValues) as expectedNumberOfValues. if err := validateLabelValues(labelValues, len(labelValues)); err != nil { d.err = err return d } // Now add the variable label names, but prefix them with something that // cannot be in a regular label name. That prevents matching the label // dimension with a different mix between preset and variable labels. for _, labelName := range variableLabels { if !checkLabelName(labelName) { d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) return d } labelNames = append(labelNames, "$"+labelName) labelNameSet[labelName] = struct{}{} } if len(labelNames) != len(labelNameSet) { d.err = errors.New("duplicate label names") return d } xxh := xxhash.New() for _, val := range labelValues { xxh.WriteString(val) xxh.Write(separatorByteSlice) } d.id = xxh.Sum64() // Sort labelNames so that order doesn't matter for the hash. sort.Strings(labelNames) // Now hash together (in this order) the help string and the sorted // label names. xxh.Reset() xxh.WriteString(help) xxh.Write(separatorByteSlice) for _, labelName := range labelNames { xxh.WriteString(labelName) xxh.Write(separatorByteSlice) } d.dimHash = xxh.Sum64() d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) for n, v := range constLabels { d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ Name: proto.String(n), Value: proto.String(v), }) } sort.Sort(labelPairSorter(d.constLabelPairs)) return d } // NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the // provided error set. If a collector returning such a descriptor is registered, // registration will fail with the provided error. NewInvalidDesc can be used by // a Collector to signal inability to describe itself. func NewInvalidDesc(err error) *Desc { return &Desc{ err: err, } } func (d *Desc) String() string { lpStrings := make([]string, 0, len(d.constLabelPairs)) for _, lp := range d.constLabelPairs { lpStrings = append( lpStrings, fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } return fmt.Sprintf( "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", d.fqName, d.help, strings.Join(lpStrings, ","), d.variableLabels, ) } client_golang-1.11.0/prometheus/desc_test.go000066400000000000000000000015351405741072000211020ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "testing" ) func TestNewDescInvalidLabelValues(t *testing.T) { desc := NewDesc( "sample_label", "sample label", nil, Labels{"a": "\xFF"}, ) if desc.err == nil { t.Errorf("NewDesc: expected error because: %s", desc.err) } } client_golang-1.11.0/prometheus/doc.go000066400000000000000000000227401405741072000176730ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package prometheus is the core instrumentation package. It provides metrics // primitives to instrument code for monitoring. It also offers a registry for // metrics. Sub-packages allow to expose the registered metrics via HTTP // (package promhttp) or push them to a Pushgateway (package push). There is // also a sub-package promauto, which provides metrics constructors with // automatic registration. // // All exported functions and methods are safe to be used concurrently unless // specified otherwise. // // A Basic Example // // As a starting point, a very basic usage example: // // package main // // import ( // "log" // "net/http" // // "github.com/prometheus/client_golang/prometheus" // "github.com/prometheus/client_golang/prometheus/promhttp" // ) // // var ( // cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ // Name: "cpu_temperature_celsius", // Help: "Current temperature of the CPU.", // }) // hdFailures = prometheus.NewCounterVec( // prometheus.CounterOpts{ // Name: "hd_errors_total", // Help: "Number of hard-disk errors.", // }, // []string{"device"}, // ) // ) // // func init() { // // Metrics have to be registered to be exposed: // prometheus.MustRegister(cpuTemp) // prometheus.MustRegister(hdFailures) // } // // func main() { // cpuTemp.Set(65.3) // hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // // // The Handler function provides a default handler to expose metrics // // via an HTTP server. "/metrics" is the usual endpoint for that. // http.Handle("/metrics", promhttp.Handler()) // log.Fatal(http.ListenAndServe(":8080", nil)) // } // // // This is a complete program that exports two metrics, a Gauge and a Counter, // the latter with a label attached to turn it into a (one-dimensional) vector. // // Metrics // // The number of exported identifiers in this package might appear a bit // overwhelming. However, in addition to the basic plumbing shown in the example // above, you only need to understand the different metric types and their // vector versions for basic usage. Furthermore, if you are not concerned with // fine-grained control of when and how to register metrics with the registry, // have a look at the promauto package, which will effectively allow you to // ignore registration altogether in simple cases. // // Above, you have already touched the Counter and the Gauge. There are two more // advanced metric types: the Summary and Histogram. A more thorough description // of those four metric types can be found in the Prometheus docs: // https://prometheus.io/docs/concepts/metric_types/ // // In addition to the fundamental metric types Gauge, Counter, Summary, and // Histogram, a very important part of the Prometheus data model is the // partitioning of samples along dimensions called labels, which results in // metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, // and HistogramVec. // // While only the fundamental metric types implement the Metric interface, both // the metrics and their vector versions implement the Collector interface. A // Collector manages the collection of a number of Metrics, but for convenience, // a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and // Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, // and HistogramVec are not. // // To create instances of Metrics and their vector versions, you need a suitable // …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. // // Custom Collectors and constant Metrics // // While you could create your own implementations of Metric, most likely you // will only ever implement the Collector interface on your own. At a first // glance, a custom Collector seems handy to bundle Metrics for common // registration (with the prime example of the different metric vectors above, // which bundle all the metrics of the same name but with different labels). // // There is a more involved use case, too: If you already have metrics // available, created outside of the Prometheus context, you don't need the // interface of the various Metric types. You essentially want to mirror the // existing numbers into Prometheus Metrics during collection. An own // implementation of the Collector interface is perfect for that. You can create // Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and // NewConstSummary (and their respective Must… versions). NewConstMetric is used // for all metric types with just a float64 as their value: Counter, Gauge, and // a special “type” called Untyped. Use the latter if you are not sure if the // mirrored metric is a Counter or a Gauge. Creation of the Metric instance // happens in the Collect method. The Describe method has to return separate // Desc instances, representative of the “throw-away” metrics to be created // later. NewDesc comes in handy to create those Desc instances. Alternatively, // you could return no Desc at all, which will mark the Collector “unchecked”. // No checks are performed at registration time, but metric consistency will // still be ensured at scrape time, i.e. any inconsistencies will lead to scrape // errors. Thus, with unchecked Collectors, the responsibility to not collect // metrics that lead to inconsistencies in the total scrape result lies with the // implementer of the Collector. While this is not a desirable state, it is // sometimes necessary. The typical use case is a situation where the exact // metrics to be returned by a Collector cannot be predicted at registration // time, but the implementer has sufficient knowledge of the whole system to // guarantee metric consistency. // // The Collector example illustrates the use case. You can also look at the // source code of the processCollector (mirroring process metrics), the // goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar // metrics) as examples that are used in this package itself. // // If you just need to call a function to get a single float value to collect as // a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting // shortcuts. // // Advanced Uses of the Registry // // While MustRegister is the by far most common way of registering a Collector, // sometimes you might want to handle the errors the registration might cause. // As suggested by the name, MustRegister panics if an error occurs. With the // Register function, the error is returned and can be handled. // // An error is returned if the registered Collector is incompatible or // inconsistent with already registered metrics. The registry aims for // consistency of the collected metrics according to the Prometheus data model. // Inconsistencies are ideally detected at registration time, not at collect // time. The former will usually be detected at start-up time of a program, // while the latter will only happen at scrape time, possibly not even on the // first scrape if the inconsistency only becomes relevant later. That is the // main reason why a Collector and a Metric have to describe themselves to the // registry. // // So far, everything we did operated on the so-called default registry, as it // can be found in the global DefaultRegisterer variable. With NewRegistry, you // can create a custom registry, or you can even implement the Registerer or // Gatherer interfaces yourself. The methods Register and Unregister work in the // same way on a custom registry as the global functions Register and Unregister // on the default registry. // // There are a number of uses for custom registries: You can use registries with // special properties, see NewPedanticRegistry. You can avoid global state, as // it is imposed by the DefaultRegisterer. You can use multiple registries at // the same time to expose different metrics in different ways. You can use // separate registries for testing purposes. // // Also note that the DefaultRegisterer comes registered with a Collector for Go // runtime metrics (via NewGoCollector) and a Collector for process metrics (via // NewProcessCollector). With a custom registry, you are in control and decide // yourself about the Collectors to register. // // HTTP Exposition // // The Registry implements the Gatherer interface. The caller of the Gather // method can then expose the gathered metrics in some way. Usually, the metrics // are served via HTTP on the /metrics endpoint. That's happening in the example // above. The tools to expose metrics via HTTP are in the promhttp sub-package. // // Pushing to the Pushgateway // // Function for pushing to the Pushgateway can be found in the push sub-package. // // Graphite Bridge // // Functions and examples to push metrics from a Gatherer to Graphite can be // found in the graphite sub-package. // // Other Means of Exposition // // More ways of exposing metrics can easily be added by following the approaches // of the existing implementations. package prometheus client_golang-1.11.0/prometheus/example_clustermanager_test.go000066400000000000000000000117061405741072000247140ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus_test import ( "log" "net/http" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) // ClusterManager is an example for a system that might have been built without // Prometheus in mind. It models a central manager of jobs running in a // cluster. Thus, we implement a custom Collector called // ClusterManagerCollector, which collects information from a ClusterManager // using its provided methods and turns them into Prometheus Metrics for // collection. // // An additional challenge is that multiple instances of the ClusterManager are // run within the same binary, each in charge of a different zone. We need to // make use of wrapping Registerers to be able to register each // ClusterManagerCollector instance with Prometheus. type ClusterManager struct { Zone string // Contains many more fields not listed in this example. } // ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a // real cluster manager would have to do. Since it may actually be really // expensive, it must only be called once per collection. This implementation, // obviously, only returns some made-up data. func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( oomCountByHost map[string]int, ramUsageByHost map[string]float64, ) { // Just example fake data. oomCountByHost = map[string]int{ "foo.example.org": 42, "bar.example.org": 2001, } ramUsageByHost = map[string]float64{ "foo.example.org": 6.023e23, "bar.example.org": 3.14, } return } // ClusterManagerCollector implements the Collector interface. type ClusterManagerCollector struct { ClusterManager *ClusterManager } // Descriptors used by the ClusterManagerCollector below. var ( oomCountDesc = prometheus.NewDesc( "clustermanager_oom_crashes_total", "Number of OOM crashes.", []string{"host"}, nil, ) ramUsageDesc = prometheus.NewDesc( "clustermanager_ram_usage_bytes", "RAM usage as reported to the cluster manager.", []string{"host"}, nil, ) ) // Describe is implemented with DescribeByCollect. That's possible because the // Collect method will always return the same two metrics with the same two // descriptors. func (cc ClusterManagerCollector) Describe(ch chan<- *prometheus.Desc) { prometheus.DescribeByCollect(cc, ch) } // Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it // creates constant metrics for each host on the fly based on the returned data. // // Note that Collect could be called concurrently, so we depend on // ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. func (cc ClusterManagerCollector) Collect(ch chan<- prometheus.Metric) { oomCountByHost, ramUsageByHost := cc.ClusterManager.ReallyExpensiveAssessmentOfTheSystemState() for host, oomCount := range oomCountByHost { ch <- prometheus.MustNewConstMetric( oomCountDesc, prometheus.CounterValue, float64(oomCount), host, ) } for host, ramUsage := range ramUsageByHost { ch <- prometheus.MustNewConstMetric( ramUsageDesc, prometheus.GaugeValue, ramUsage, host, ) } } // NewClusterManager first creates a Prometheus-ignorant ClusterManager // instance. Then, it creates a ClusterManagerCollector for the just created // ClusterManager. Finally, it registers the ClusterManagerCollector with a // wrapping Registerer that adds the zone as a label. In this way, the metrics // collected by different ClusterManagerCollectors do not collide. func NewClusterManager(zone string, reg prometheus.Registerer) *ClusterManager { c := &ClusterManager{ Zone: zone, } cc := ClusterManagerCollector{ClusterManager: c} prometheus.WrapRegistererWith(prometheus.Labels{"zone": zone}, reg).MustRegister(cc) return c } func ExampleCollector() { // Since we are dealing with custom Collector implementations, it might // be a good idea to try it out with a pedantic registry. reg := prometheus.NewPedanticRegistry() // Construct cluster managers. In real code, we would assign them to // variables to then do something with them. NewClusterManager("db", reg) NewClusterManager("ca", reg) // Add the standard process and Go metrics to the custom registry. reg.MustRegister( prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), prometheus.NewGoCollector(), ) http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{})) log.Fatal(http.ListenAndServe(":8080", nil)) } client_golang-1.11.0/prometheus/example_metricvec_test.go000066400000000000000000000105301405741072000236530ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus_test import ( "fmt" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" "github.com/prometheus/client_golang/prometheus" ) // Info implements an info pseudo-metric, which is modeled as a Gauge that // always has a value of 1. In practice, you would just use a Gauge directly, // but for this example, we pretend it would be useful to have a “native” // implementation. type Info struct { desc *prometheus.Desc labelPairs []*dto.LabelPair } func (i Info) Desc() *prometheus.Desc { return i.desc } func (i Info) Write(out *dto.Metric) error { out.Label = i.labelPairs out.Gauge = &dto.Gauge{Value: proto.Float64(1)} return nil } // InfoVec is the vector version for Info. As an info metric never changes, we // wouldn't really need to wrap GetMetricWithLabelValues and GetMetricWith // because Info has no additional methods compared to the vanilla Metric that // the unwrapped MetricVec methods return. However, to demonstrate all there is // to do to fully implement a vector for a custom Metric implementation, we do // it in this example anyway. type InfoVec struct { *prometheus.MetricVec } func NewInfoVec(name, help string, labelNames []string) *InfoVec { desc := prometheus.NewDesc(name, help, labelNames, nil) return &InfoVec{ MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric { if len(lvs) != len(labelNames) { panic("inconsistent label cardinality") } return Info{desc: desc, labelPairs: prometheus.MakeLabelPairs(desc, lvs)} }), } } func (v *InfoVec) GetMetricWithLabelValues(lvs ...string) (Info, error) { metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) return metric.(Info), err } func (v *InfoVec) GetMetricWith(labels prometheus.Labels) (Info, error) { metric, err := v.MetricVec.GetMetricWith(labels) return metric.(Info), err } func (v *InfoVec) WithLabelValues(lvs ...string) Info { i, err := v.GetMetricWithLabelValues(lvs...) if err != nil { panic(err) } return i } func (v *InfoVec) With(labels prometheus.Labels) Info { i, err := v.GetMetricWith(labels) if err != nil { panic(err) } return i } func (v *InfoVec) CurryWith(labels prometheus.Labels) (*InfoVec, error) { vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &InfoVec{vec}, err } return nil, err } func (v *InfoVec) MustCurryWith(labels prometheus.Labels) *InfoVec { vec, err := v.CurryWith(labels) if err != nil { panic(err) } return vec } func ExampleMetricVec() { infoVec := NewInfoVec( "library_version_info", "Versions of the libraries used in this binary.", []string{"library", "version"}, ) infoVec.WithLabelValues("prometheus/client_golang", "1.7.1") infoVec.WithLabelValues("k8s.io/client-go", "0.18.8") // Just for demonstration, let's check the state of the InfoVec by // registering it with a custom registry and then let it collect the // metrics. reg := prometheus.NewRegistry() reg.MustRegister(infoVec) metricFamilies, err := reg.Gather() if err != nil || len(metricFamilies) != 1 { panic("unexpected behavior of custom test registry") } fmt.Println(proto.MarshalTextString(metricFamilies[0])) // Output: // name: "library_version_info" // help: "Versions of the libraries used in this binary." // type: GAUGE // metric: < // label: < // name: "library" // value: "k8s.io/client-go" // > // label: < // name: "version" // value: "0.18.8" // > // gauge: < // value: 1 // > // > // metric: < // label: < // name: "library" // value: "prometheus/client_golang" // > // label: < // name: "version" // value: "1.7.1" // > // gauge: < // value: 1 // > // > } client_golang-1.11.0/prometheus/example_timer_complex_test.go000066400000000000000000000052611405741072000245460ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus_test import ( "net/http" "github.com/prometheus/client_golang/prometheus" ) var ( // apiRequestDuration tracks the duration separate for each HTTP status // class (1xx, 2xx, ...). This creates a fair amount of time series on // the Prometheus server. Usually, you would track the duration of // serving HTTP request without partitioning by outcome. Do something // like this only if needed. Also note how only status classes are // tracked, not every single status code. The latter would create an // even larger amount of time series. Request counters partitioned by // status code are usually OK as each counter only creates one time // series. Histograms are way more expensive, so partition with care and // only where you really need separate latency tracking. Partitioning by // status class is only an example. In concrete cases, other partitions // might make more sense. apiRequestDuration = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "api_request_duration_seconds", Help: "Histogram for the request duration of the public API, partitioned by status class.", Buckets: prometheus.ExponentialBuckets(0.1, 1.5, 5), }, []string{"status_class"}, ) ) func handler(w http.ResponseWriter, r *http.Request) { status := http.StatusOK // The ObserverFunc gets called by the deferred ObserveDuration and // decides which Histogram's Observe method is called. timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { switch { case status >= 500: // Server error. apiRequestDuration.WithLabelValues("5xx").Observe(v) case status >= 400: // Client error. apiRequestDuration.WithLabelValues("4xx").Observe(v) case status >= 300: // Redirection. apiRequestDuration.WithLabelValues("3xx").Observe(v) case status >= 200: // Success. apiRequestDuration.WithLabelValues("2xx").Observe(v) default: // Informational. apiRequestDuration.WithLabelValues("1xx").Observe(v) } })) defer timer.ObserveDuration() // Handle the request. Set status accordingly. // ... } func ExampleTimer_complex() { http.HandleFunc("/api", handler) } client_golang-1.11.0/prometheus/example_timer_gauge_test.go000066400000000000000000000030671405741072000241710ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus_test import ( "os" "github.com/prometheus/client_golang/prometheus" ) var ( // If a function is called rarely (i.e. not more often than scrapes // happen) or ideally only once (like in a batch job), it can make sense // to use a Gauge for timing the function call. For timing a batch job // and pushing the result to a Pushgateway, see also the comprehensive // example in the push package. funcDuration = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "example_function_duration_seconds", Help: "Duration of the last call of an example function.", }) ) func run() error { // The Set method of the Gauge is used to observe the duration. timer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set)) defer timer.ObserveDuration() // Do something. Return errors as encountered. The use of 'defer' above // makes sure the function is still timed properly. return nil } func ExampleTimer_gauge() { if err := run(); err != nil { os.Exit(1) } } client_golang-1.11.0/prometheus/example_timer_test.go000066400000000000000000000025411405741072000230150ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus_test import ( "math/rand" "time" "github.com/prometheus/client_golang/prometheus" ) var ( requestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "example_request_duration_seconds", Help: "Histogram for the runtime of a simple example function.", Buckets: prometheus.LinearBuckets(0.01, 0.01, 10), }) ) func ExampleTimer() { // timer times this example function. It uses a Histogram, but a Summary // would also work, as both implement Observer. Check out // https://prometheus.io/docs/practices/histograms/ for differences. timer := prometheus.NewTimer(requestDuration) defer timer.ObserveDuration() // Do something here that takes time. time.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond) } client_golang-1.11.0/prometheus/examples_test.go000066400000000000000000000554271405741072000220130ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus_test import ( "bytes" "fmt" "math" "net/http" "runtime" "strings" "time" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) func ExampleGauge() { opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "our_company", Subsystem: "blob_storage", Name: "ops_queued", Help: "Number of blob storage operations waiting to be processed.", }) prometheus.MustRegister(opsQueued) // 10 operations queued by the goroutine managing incoming requests. opsQueued.Add(10) // A worker goroutine has picked up a waiting operation. opsQueued.Dec() // And once more... opsQueued.Dec() } func ExampleGaugeVec() { opsQueued := prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "our_company", Subsystem: "blob_storage", Name: "ops_queued", Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", }, []string{ // Which user has requested the operation? "user", // Of what type is the operation? "type", }, ) prometheus.MustRegister(opsQueued) // Increase a value using compact (but order-sensitive!) WithLabelValues(). opsQueued.WithLabelValues("bob", "put").Add(4) // Increase a value with a map using WithLabels. More verbose, but order // doesn't matter anymore. opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() } func ExampleGaugeFunc_simple() { if err := prometheus.Register(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Subsystem: "runtime", Name: "goroutines_count", Help: "Number of goroutines that currently exist.", }, func() float64 { return float64(runtime.NumGoroutine()) }, )); err == nil { fmt.Println("GaugeFunc 'goroutines_count' registered.") } // Note that the count of goroutines is a gauge (and not a counter) as // it can go up and down. // Output: // GaugeFunc 'goroutines_count' registered. } func ExampleGaugeFunc_constLabels() { // primaryDB and secondaryDB represent two example *sql.DB connections we want to instrument. var primaryDB, secondaryDB interface { Stats() struct{ OpenConnections int } } if err := prometheus.Register(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Namespace: "mysql", Name: "connections_open", Help: "Number of mysql connections open.", ConstLabels: prometheus.Labels{"destination": "primary"}, }, func() float64 { return float64(primaryDB.Stats().OpenConnections) }, )); err == nil { fmt.Println(`GaugeFunc 'connections_open' for primary DB connection registered with labels {destination="primary"}`) } if err := prometheus.Register(prometheus.NewGaugeFunc( prometheus.GaugeOpts{ Namespace: "mysql", Name: "connections_open", Help: "Number of mysql connections open.", ConstLabels: prometheus.Labels{"destination": "secondary"}, }, func() float64 { return float64(secondaryDB.Stats().OpenConnections) }, )); err == nil { fmt.Println(`GaugeFunc 'connections_open' for secondary DB connection registered with labels {destination="secondary"}`) } // Note that we can register more than once GaugeFunc with same metric name // as long as their const labels are consistent. // Output: // GaugeFunc 'connections_open' for primary DB connection registered with labels {destination="primary"} // GaugeFunc 'connections_open' for secondary DB connection registered with labels {destination="secondary"} } func ExampleCounterVec() { httpReqs := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "http_requests_total", Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", }, []string{"code", "method"}, ) prometheus.MustRegister(httpReqs) httpReqs.WithLabelValues("404", "POST").Add(42) // If you have to access the same set of labels very frequently, it // might be good to retrieve the metric only once and keep a handle to // it. But beware of deletion of that metric, see below! m := httpReqs.WithLabelValues("200", "GET") for i := 0; i < 1000000; i++ { m.Inc() } // Delete a metric from the vector. If you have previously kept a handle // to that metric (as above), future updates via that handle will go // unseen (even if you re-create a metric with the same label set // later). httpReqs.DeleteLabelValues("200", "GET") // Same thing with the more verbose Labels syntax. httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) } func ExampleRegister() { // Imagine you have a worker pool and want to count the tasks completed. taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ Subsystem: "worker_pool", Name: "completed_tasks_total", Help: "Total number of tasks completed.", }) // This will register fine. if err := prometheus.Register(taskCounter); err != nil { fmt.Println(err) } else { fmt.Println("taskCounter registered.") } // Don't forget to tell the HTTP server about the Prometheus handler. // (In a real program, you still need to start the HTTP server...) http.Handle("/metrics", promhttp.Handler()) // Now you can start workers and give every one of them a pointer to // taskCounter and let it increment it whenever it completes a task. taskCounter.Inc() // This has to happen somewhere in the worker code. // But wait, you want to see how individual workers perform. So you need // a vector of counters, with one element for each worker. taskCounterVec := prometheus.NewCounterVec( prometheus.CounterOpts{ Subsystem: "worker_pool", Name: "completed_tasks_total", Help: "Total number of tasks completed.", }, []string{"worker_id"}, ) // Registering will fail because we already have a metric of that name. if err := prometheus.Register(taskCounterVec); err != nil { fmt.Println("taskCounterVec not registered:", err) } else { fmt.Println("taskCounterVec registered.") } // To fix, first unregister the old taskCounter. if prometheus.Unregister(taskCounter) { fmt.Println("taskCounter unregistered.") } // Try registering taskCounterVec again. if err := prometheus.Register(taskCounterVec); err != nil { fmt.Println("taskCounterVec not registered:", err) } else { fmt.Println("taskCounterVec registered.") } // Bummer! Still doesn't work. // Prometheus will not allow you to ever export metrics with // inconsistent help strings or label names. After unregistering, the // unregistered metrics will cease to show up in the /metrics HTTP // response, but the registry still remembers that those metrics had // been exported before. For this example, we will now choose a // different name. (In a real program, you would obviously not export // the obsolete metric in the first place.) taskCounterVec = prometheus.NewCounterVec( prometheus.CounterOpts{ Subsystem: "worker_pool", Name: "completed_tasks_by_id", Help: "Total number of tasks completed.", }, []string{"worker_id"}, ) if err := prometheus.Register(taskCounterVec); err != nil { fmt.Println("taskCounterVec not registered:", err) } else { fmt.Println("taskCounterVec registered.") } // Finally it worked! // The workers have to tell taskCounterVec their id to increment the // right element in the metric vector. taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. // Each worker could also keep a reference to their own counter element // around. Pick the counter at initialization time of the worker. myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. myCounter.Inc() // Somewhere in the code of that worker. // Note that something like WithLabelValues("42", "spurious arg") would // panic (because you have provided too many label values). If you want // to get an error instead, use GetMetricWithLabelValues(...) instead. notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") if err != nil { fmt.Println("Worker initialization failed:", err) } if notMyCounter == nil { fmt.Println("notMyCounter is nil.") } // A different (and somewhat tricky) approach is to use // ConstLabels. ConstLabels are pairs of label names and label values // that never change. Each worker creates and registers an own Counter // instance where the only difference is in the value of the // ConstLabels. Those Counters can all be registered because the // different ConstLabel values guarantee that each worker will increment // a different Counter metric. counterOpts := prometheus.CounterOpts{ Subsystem: "worker_pool", Name: "completed_tasks", Help: "Total number of tasks completed.", ConstLabels: prometheus.Labels{"worker_id": "42"}, } taskCounterForWorker42 := prometheus.NewCounter(counterOpts) if err := prometheus.Register(taskCounterForWorker42); err != nil { fmt.Println("taskCounterVForWorker42 not registered:", err) } else { fmt.Println("taskCounterForWorker42 registered.") } // Obviously, in real code, taskCounterForWorker42 would be a member // variable of a worker struct, and the "42" would be retrieved with a // GetId() method or something. The Counter would be created and // registered in the initialization code of the worker. // For the creation of the next Counter, we can recycle // counterOpts. Just change the ConstLabels. counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) if err := prometheus.Register(taskCounterForWorker2001); err != nil { fmt.Println("taskCounterVForWorker2001 not registered:", err) } else { fmt.Println("taskCounterForWorker2001 registered.") } taskCounterForWorker2001.Inc() taskCounterForWorker42.Inc() taskCounterForWorker2001.Inc() // Yet another approach would be to turn the workers themselves into // Collectors and register them. See the Collector example for details. // Output: // taskCounter registered. // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string // taskCounter unregistered. // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string // taskCounterVec registered. // Worker initialization failed: inconsistent label cardinality: expected 1 label values but got 2 in []string{"42", "spurious arg"} // notMyCounter is nil. // taskCounterForWorker42 registered. // taskCounterForWorker2001 registered. } func ExampleSummary() { temps := prometheus.NewSummary(prometheus.SummaryOpts{ Name: "pond_temperature_celsius", Help: "The temperature of the frog pond.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) // Simulate some observations. for i := 0; i < 1000; i++ { temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) } // Just for demonstration, let's check the state of the summary by // (ab)using its Write method (which is usually only used by Prometheus // internally). metric := &dto.Metric{} temps.Write(metric) fmt.Println(proto.MarshalTextString(metric)) // Output: // summary: < // sample_count: 1000 // sample_sum: 29969.50000000001 // quantile: < // quantile: 0.5 // value: 31.1 // > // quantile: < // quantile: 0.9 // value: 41.3 // > // quantile: < // quantile: 0.99 // value: 41.9 // > // > } func ExampleSummaryVec() { temps := prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "pond_temperature_celsius", Help: "The temperature of the frog pond.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, []string{"species"}, ) // Simulate some observations. for i := 0; i < 1000; i++ { temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) } // Create a Summary without any observations. temps.WithLabelValues("leiopelma-hochstetteri") // Just for demonstration, let's check the state of the summary vector // by registering it with a custom registry and then let it collect the // metrics. reg := prometheus.NewRegistry() reg.MustRegister(temps) metricFamilies, err := reg.Gather() if err != nil || len(metricFamilies) != 1 { panic("unexpected behavior of custom test registry") } fmt.Println(proto.MarshalTextString(metricFamilies[0])) // Output: // name: "pond_temperature_celsius" // help: "The temperature of the frog pond." // type: SUMMARY // metric: < // label: < // name: "species" // value: "leiopelma-hochstetteri" // > // summary: < // sample_count: 0 // sample_sum: 0 // quantile: < // quantile: 0.5 // value: nan // > // quantile: < // quantile: 0.9 // value: nan // > // quantile: < // quantile: 0.99 // value: nan // > // > // > // metric: < // label: < // name: "species" // value: "lithobates-catesbeianus" // > // summary: < // sample_count: 1000 // sample_sum: 31956.100000000017 // quantile: < // quantile: 0.5 // value: 32.4 // > // quantile: < // quantile: 0.9 // value: 41.4 // > // quantile: < // quantile: 0.99 // value: 41.9 // > // > // > // metric: < // label: < // name: "species" // value: "litoria-caerulea" // > // summary: < // sample_count: 1000 // sample_sum: 29969.50000000001 // quantile: < // quantile: 0.5 // value: 31.1 // > // quantile: < // quantile: 0.9 // value: 41.3 // > // quantile: < // quantile: 0.99 // value: 41.9 // > // > // > } func ExampleNewConstSummary() { desc := prometheus.NewDesc( "http_request_duration_seconds", "A summary of the HTTP request durations.", []string{"code", "method"}, prometheus.Labels{"owner": "example"}, ) // Create a constant summary from values we got from a 3rd party telemetry system. s := prometheus.MustNewConstSummary( desc, 4711, 403.34, map[float64]float64{0.5: 42.3, 0.9: 323.3}, "200", "get", ) // Just for demonstration, let's check the state of the summary by // (ab)using its Write method (which is usually only used by Prometheus // internally). metric := &dto.Metric{} s.Write(metric) fmt.Println(proto.MarshalTextString(metric)) // Output: // label: < // name: "code" // value: "200" // > // label: < // name: "method" // value: "get" // > // label: < // name: "owner" // value: "example" // > // summary: < // sample_count: 4711 // sample_sum: 403.34 // quantile: < // quantile: 0.5 // value: 42.3 // > // quantile: < // quantile: 0.9 // value: 323.3 // > // > } func ExampleHistogram() { temps := prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "pond_temperature_celsius", Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide. }) // Simulate some observations. for i := 0; i < 1000; i++ { temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) } // Just for demonstration, let's check the state of the histogram by // (ab)using its Write method (which is usually only used by Prometheus // internally). metric := &dto.Metric{} temps.Write(metric) fmt.Println(proto.MarshalTextString(metric)) // Output: // histogram: < // sample_count: 1000 // sample_sum: 29969.50000000001 // bucket: < // cumulative_count: 192 // upper_bound: 20 // > // bucket: < // cumulative_count: 366 // upper_bound: 25 // > // bucket: < // cumulative_count: 501 // upper_bound: 30 // > // bucket: < // cumulative_count: 638 // upper_bound: 35 // > // bucket: < // cumulative_count: 816 // upper_bound: 40 // > // > } func ExampleNewConstHistogram() { desc := prometheus.NewDesc( "http_request_duration_seconds", "A histogram of the HTTP request durations.", []string{"code", "method"}, prometheus.Labels{"owner": "example"}, ) // Create a constant histogram from values we got from a 3rd party telemetry system. h := prometheus.MustNewConstHistogram( desc, 4711, 403.34, map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, "200", "get", ) // Just for demonstration, let's check the state of the histogram by // (ab)using its Write method (which is usually only used by Prometheus // internally). metric := &dto.Metric{} h.Write(metric) fmt.Println(proto.MarshalTextString(metric)) // Output: // label: < // name: "code" // value: "200" // > // label: < // name: "method" // value: "get" // > // label: < // name: "owner" // value: "example" // > // histogram: < // sample_count: 4711 // sample_sum: 403.34 // bucket: < // cumulative_count: 121 // upper_bound: 25 // > // bucket: < // cumulative_count: 2403 // upper_bound: 50 // > // bucket: < // cumulative_count: 3221 // upper_bound: 100 // > // bucket: < // cumulative_count: 4233 // upper_bound: 200 // > // > } func ExampleAlreadyRegisteredError() { reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ Name: "requests_total", Help: "The total number of requests served.", }) if err := prometheus.Register(reqCounter); err != nil { if are, ok := err.(prometheus.AlreadyRegisteredError); ok { // A counter for that metric has been registered before. // Use the old counter from now on. reqCounter = are.ExistingCollector.(prometheus.Counter) } else { // Something else went wrong! panic(err) } } reqCounter.Inc() } func ExampleGatherers() { reg := prometheus.NewRegistry() temp := prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "temperature_kelvin", Help: "Temperature in Kelvin.", }, []string{"location"}, ) reg.MustRegister(temp) temp.WithLabelValues("outside").Set(273.14) temp.WithLabelValues("inside").Set(298.44) var parser expfmt.TextParser text := ` # TYPE humidity_percent gauge # HELP humidity_percent Humidity in %. humidity_percent{location="outside"} 45.4 humidity_percent{location="inside"} 33.2 # TYPE temperature_kelvin gauge # HELP temperature_kelvin Temperature in Kelvin. temperature_kelvin{location="somewhere else"} 4.5 ` parseText := func() ([]*dto.MetricFamily, error) { parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) if err != nil { return nil, err } var result []*dto.MetricFamily for _, mf := range parsed { result = append(result, mf) } return result, nil } gatherers := prometheus.Gatherers{ reg, prometheus.GathererFunc(parseText), } gathering, err := gatherers.Gather() if err != nil { fmt.Println(err) } out := &bytes.Buffer{} for _, mf := range gathering { if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { panic(err) } } fmt.Print(out.String()) fmt.Println("----------") // Note how the temperature_kelvin metric family has been merged from // different sources. Now try text = ` # TYPE humidity_percent gauge # HELP humidity_percent Humidity in %. humidity_percent{location="outside"} 45.4 humidity_percent{location="inside"} 33.2 # TYPE temperature_kelvin gauge # HELP temperature_kelvin Temperature in Kelvin. # Duplicate metric: temperature_kelvin{location="outside"} 265.3 # Missing location label (note that this is undesirable but valid): temperature_kelvin 4.5 ` gathering, err = gatherers.Gather() if err != nil { fmt.Println(err) } // Note that still as many metrics as possible are returned: out.Reset() for _, mf := range gathering { if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { panic(err) } } fmt.Print(out.String()) // Output: // # HELP humidity_percent Humidity in %. // # TYPE humidity_percent gauge // humidity_percent{location="inside"} 33.2 // humidity_percent{location="outside"} 45.4 // # HELP temperature_kelvin Temperature in Kelvin. // # TYPE temperature_kelvin gauge // temperature_kelvin{location="inside"} 298.44 // temperature_kelvin{location="outside"} 273.14 // temperature_kelvin{location="somewhere else"} 4.5 // ---------- // collected metric "temperature_kelvin" { label: gauge: } was collected before with the same name and label values // # HELP humidity_percent Humidity in %. // # TYPE humidity_percent gauge // humidity_percent{location="inside"} 33.2 // humidity_percent{location="outside"} 45.4 // # HELP temperature_kelvin Temperature in Kelvin. // # TYPE temperature_kelvin gauge // temperature_kelvin 4.5 // temperature_kelvin{location="inside"} 298.44 // temperature_kelvin{location="outside"} 273.14 } func ExampleNewMetricWithTimestamp() { desc := prometheus.NewDesc( "temperature_kelvin", "Current temperature in Kelvin.", nil, nil, ) // Create a constant gauge from values we got from an external // temperature reporting system. Those values are reported with a slight // delay, so we want to add the timestamp of the actual measurement. temperatureReportedByExternalSystem := 298.15 timeReportedByExternalSystem := time.Date(2009, time.November, 10, 23, 0, 0, 12345678, time.UTC) s := prometheus.NewMetricWithTimestamp( timeReportedByExternalSystem, prometheus.MustNewConstMetric( desc, prometheus.GaugeValue, temperatureReportedByExternalSystem, ), ) // Just for demonstration, let's check the state of the gauge by // (ab)using its Write method (which is usually only used by Prometheus // internally). metric := &dto.Metric{} s.Write(metric) fmt.Println(proto.MarshalTextString(metric)) // Output: // gauge: < // value: 298.15 // > // timestamp_ms: 1257894000012 } client_golang-1.11.0/prometheus/expvar_collector.go000066400000000000000000000043271405741072000225020ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "encoding/json" "expvar" ) type expvarCollector struct { exports map[string]*Desc } // NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector. // See there for documentation. // // Deprecated: Use collectors.NewExpvarCollector instead. func NewExpvarCollector(exports map[string]*Desc) Collector { return &expvarCollector{ exports: exports, } } // Describe implements Collector. func (e *expvarCollector) Describe(ch chan<- *Desc) { for _, desc := range e.exports { ch <- desc } } // Collect implements Collector. func (e *expvarCollector) Collect(ch chan<- Metric) { for name, desc := range e.exports { var m Metric expVar := expvar.Get(name) if expVar == nil { continue } var v interface{} labels := make([]string, len(desc.variableLabels)) if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { ch <- NewInvalidMetric(desc, err) continue } var processValue func(v interface{}, i int) processValue = func(v interface{}, i int) { if i >= len(labels) { copiedLabels := append(make([]string, 0, len(labels)), labels...) switch v := v.(type) { case float64: m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) case bool: if v { m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) } else { m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) } default: return } ch <- m return } vm, ok := v.(map[string]interface{}) if !ok { return } for lv, val := range vm { labels[i] = lv processValue(val, i+1) } } processValue(v, 0) } } client_golang-1.11.0/prometheus/expvar_collector_test.go000066400000000000000000000064171405741072000235430ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus_test import ( "expvar" "fmt" "sort" "strings" dto "github.com/prometheus/client_model/go" "github.com/prometheus/client_golang/prometheus" ) func ExampleNewExpvarCollector() { expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ "memstats": prometheus.NewDesc( "expvar_memstats", "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", []string{"type"}, nil, ), "lone-int": prometheus.NewDesc( "expvar_lone_int", "Just an expvar int as an example.", nil, nil, ), "http-request-map": prometheus.NewDesc( "expvar_http_request_total", "How many http requests processed, partitioned by status code and http method.", []string{"code", "method"}, nil, ), }) prometheus.MustRegister(expvarCollector) // The Prometheus part is done here. But to show that this example is // doing anything, we have to manually export something via expvar. In // real-life use-cases, some library would already have exported via // expvar what we want to re-export as Prometheus metrics. expvar.NewInt("lone-int").Set(42) expvarMap := expvar.NewMap("http-request-map") var ( expvarMap1, expvarMap2 expvar.Map expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int ) expvarMap1.Init() expvarMap2.Init() expvarInt11.Set(3) expvarInt12.Set(13) expvarInt21.Set(11) expvarInt22.Set(212) expvarMap1.Set("POST", &expvarInt11) expvarMap1.Set("GET", &expvarInt12) expvarMap2.Set("POST", &expvarInt21) expvarMap2.Set("GET", &expvarInt22) expvarMap.Set("404", &expvarMap1) expvarMap.Set("200", &expvarMap2) // Results in the following expvar map: // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} // Let's see what the scrape would yield, but exclude the memstats metrics. metricStrings := []string{} metric := dto.Metric{} metricChan := make(chan prometheus.Metric) go func() { expvarCollector.Collect(metricChan) close(metricChan) }() for m := range metricChan { if !strings.Contains(m.Desc().String(), "expvar_memstats") { metric.Reset() m.Write(&metric) metricStrings = append(metricStrings, metric.String()) } } sort.Strings(metricStrings) for _, s := range metricStrings { fmt.Println(strings.TrimRight(s, " ")) } // Output: // label: label: untyped: // label: label: untyped: // label: label: untyped: // label: label: untyped: // untyped: } client_golang-1.11.0/prometheus/fnv.go000066400000000000000000000022551405741072000177160ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus // Inline and byte-free variant of hash/fnv's fnv64a. const ( offset64 = 14695981039346656037 prime64 = 1099511628211 ) // hashNew initializies a new fnv64a hash value. func hashNew() uint64 { return offset64 } // hashAdd adds a string to a fnv64a hash value, returning the updated hash. func hashAdd(h uint64, s string) uint64 { for i := 0; i < len(s); i++ { h ^= uint64(s[i]) h *= prime64 } return h } // hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. func hashAddByte(h uint64, b byte) uint64 { h ^= uint64(b) h *= prime64 return h } client_golang-1.11.0/prometheus/gauge.go000066400000000000000000000234711405741072000202200ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "math" "sync/atomic" "time" dto "github.com/prometheus/client_model/go" ) // Gauge is a Metric that represents a single numerical value that can // arbitrarily go up and down. // // A Gauge is typically used for measured values like temperatures or current // memory usage, but also "counts" that can go up and down, like the number of // running goroutines. // // To create Gauge instances, use NewGauge. type Gauge interface { Metric Collector // Set sets the Gauge to an arbitrary value. Set(float64) // Inc increments the Gauge by 1. Use Add to increment it by arbitrary // values. Inc() // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary // values. Dec() // Add adds the given value to the Gauge. (The value can be negative, // resulting in a decrease of the Gauge.) Add(float64) // Sub subtracts the given value from the Gauge. (The value can be // negative, resulting in an increase of the Gauge.) Sub(float64) // SetToCurrentTime sets the Gauge to the current Unix time in seconds. SetToCurrentTime() } // GaugeOpts is an alias for Opts. See there for doc comments. type GaugeOpts Opts // NewGauge creates a new Gauge based on the provided GaugeOpts. // // The returned implementation is optimized for a fast Set method. If you have a // choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick // the former. For example, the Inc method of the returned Gauge is slower than // the Inc method of a Counter returned by NewCounter. This matches the typical // scenarios for Gauges and Counters, where the former tends to be Set-heavy and // the latter Inc-heavy. func NewGauge(opts GaugeOpts) Gauge { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ) result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} result.init(result) // Init self-collection. return result } type gauge struct { // valBits contains the bits of the represented float64 value. It has // to go first in the struct to guarantee alignment for atomic // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG valBits uint64 selfCollector desc *Desc labelPairs []*dto.LabelPair } func (g *gauge) Desc() *Desc { return g.desc } func (g *gauge) Set(val float64) { atomic.StoreUint64(&g.valBits, math.Float64bits(val)) } func (g *gauge) SetToCurrentTime() { g.Set(float64(time.Now().UnixNano()) / 1e9) } func (g *gauge) Inc() { g.Add(1) } func (g *gauge) Dec() { g.Add(-1) } func (g *gauge) Add(val float64) { for { oldBits := atomic.LoadUint64(&g.valBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + val) if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { return } } } func (g *gauge) Sub(val float64) { g.Add(val * -1) } func (g *gauge) Write(out *dto.Metric) error { val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) return populateMetric(GaugeValue, val, g.labelPairs, nil, out) } // GaugeVec is a Collector that bundles a set of Gauges that all share the same // Desc, but have different values for their variable labels. This is used if // you want to count the same thing partitioned by various dimensions // (e.g. number of operations queued, partitioned by user and operation // type). Create instances with NewGaugeVec. type GaugeVec struct { *MetricVec } // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // partitioned by the given label names. func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, labelNames, opts.ConstLabels, ) return &GaugeVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { if len(lvs) != len(desc.variableLabels) { panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) } result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result.init(result) // Init self-collection. return result }), } } // GetMetricWithLabelValues returns the Gauge for the given slice of label // values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Gauge is created. // // It is possible to call this method without using the returned Gauge to only // create the new Gauge but leave it at its starting value 0. See also the // SummaryVec example. // // Keeping the Gauge for later use is possible (and should be considered if // performance is critical), but keep in mind that Reset, DeleteLabelValues and // Delete can be used to delete the Gauge from the GaugeVec. In that case, the // Gauge will still exist, but it will not be exported anymore, even if a // Gauge with the same label values is created later. See also the CounterVec // example. // // An error is returned if the number of label values is not the same as the // number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // an alternative to avoid that type of mistake. For higher label numbers, the // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Gauge), err } return nil, err } // GetMetricWith returns the Gauge for the given Labels map (the label names // must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Gauge is created. Implications of // creating a Gauge without using it and keeping the Gauge for later use are // the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent // with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Gauge), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like // myVec.WithLabelValues("404", "GET").Add(42) func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { g, err := v.GetMetricWithLabelValues(lvs...) if err != nil { panic(err) } return g } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) func (v *GaugeVec) With(labels Labels) Gauge { g, err := v.GetMetricWith(labels) if err != nil { panic(err) } return g } // CurryWith returns a vector curried with the provided labels, i.e. the // returned vector has those labels pre-set for all labeled operations performed // on it. The cardinality of the curried vector is reduced accordingly. The // order of the remaining labels stays the same (just with the curried labels // taken out of the sequence – which is relevant for the // (GetMetric)WithLabelValues methods). It is possible to curry a curried // vector, but only with labels not yet used for currying before. // // The metrics contained in the GaugeVec are shared between the curried and // uncurried vectors. They are just accessed differently. Curried and uncurried // vectors behave identically in terms of collection. Only one must be // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &GaugeVec{vec}, err } return nil, err } // MustCurryWith works as CurryWith but panics where CurryWith would have // returned an error. func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { vec, err := v.CurryWith(labels) if err != nil { panic(err) } return vec } // GaugeFunc is a Gauge whose value is determined at collect time by calling a // provided function. // // To create GaugeFunc instances, use NewGaugeFunc. type GaugeFunc interface { Metric Collector } // NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The // value reported is determined by calling the given function from within the // Write method. Take into account that metric collection may happen // concurrently. Therefore, it must be safe to call the provided function // concurrently. // // NewGaugeFunc is a good way to create an “info” style metric with a constant // value of 1. Example: // https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), GaugeValue, function) } client_golang-1.11.0/prometheus/gauge_test.go000066400000000000000000000112611405741072000212510ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "math" "math/rand" "sync" "testing" "testing/quick" "time" dto "github.com/prometheus/client_model/go" ) func listenGaugeStream(vals, result chan float64, done chan struct{}) { var sum float64 outer: for { select { case <-done: close(vals) for v := range vals { sum += v } break outer case v := <-vals: sum += v } } result <- sum close(result) } func TestGaugeConcurrency(t *testing.T) { it := func(n uint32) bool { mutations := int(n % 10000) concLevel := int(n%15 + 1) var start, end sync.WaitGroup start.Add(1) end.Add(concLevel) sStream := make(chan float64, mutations*concLevel) result := make(chan float64) done := make(chan struct{}) go listenGaugeStream(sStream, result, done) go func() { end.Wait() close(done) }() gge := NewGauge(GaugeOpts{ Name: "test_gauge", Help: "no help can be found here", }) for i := 0; i < concLevel; i++ { vals := make([]float64, mutations) for j := 0; j < mutations; j++ { vals[j] = rand.Float64() - 0.5 } go func(vals []float64) { start.Wait() for _, v := range vals { sStream <- v gge.Add(v) } end.Done() }(vals) } start.Done() if expected, got := <-result, math.Float64frombits(gge.(*gauge).valBits); math.Abs(expected-got) > 0.000001 { t.Fatalf("expected approx. %f, got %f", expected, got) return false } return true } if err := quick.Check(it, nil); err != nil { t.Fatal(err) } } func TestGaugeVecConcurrency(t *testing.T) { it := func(n uint32) bool { mutations := int(n % 10000) concLevel := int(n%15 + 1) vecLength := int(n%5 + 1) var start, end sync.WaitGroup start.Add(1) end.Add(concLevel) sStreams := make([]chan float64, vecLength) results := make([]chan float64, vecLength) done := make(chan struct{}) for i := 0; i < vecLength; i++ { sStreams[i] = make(chan float64, mutations*concLevel) results[i] = make(chan float64) go listenGaugeStream(sStreams[i], results[i], done) } go func() { end.Wait() close(done) }() gge := NewGaugeVec( GaugeOpts{ Name: "test_gauge", Help: "no help can be found here", }, []string{"label"}, ) for i := 0; i < concLevel; i++ { vals := make([]float64, mutations) pick := make([]int, mutations) for j := 0; j < mutations; j++ { vals[j] = rand.Float64() - 0.5 pick[j] = rand.Intn(vecLength) } go func(vals []float64) { start.Wait() for i, v := range vals { sStreams[pick[i]] <- v gge.WithLabelValues(string('A' + rune(pick[i]))).Add(v) } end.Done() }(vals) } start.Done() for i := range sStreams { if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+rune(i))).(*gauge).valBits); math.Abs(expected-got) > 0.000001 { t.Fatalf("expected approx. %f, got %f", expected, got) return false } } return true } if err := quick.Check(it, nil); err != nil { t.Fatal(err) } } func TestGaugeFunc(t *testing.T) { gf := NewGaugeFunc( GaugeOpts{ Name: "test_name", Help: "test help", ConstLabels: Labels{"a": "1", "b": "2"}, }, func() float64 { return 3.1415 }, ) if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { t.Errorf("expected %q, got %q", expected, got) } m := &dto.Metric{} gf.Write(m) if expected, got := `label: label: gauge: `, m.String(); expected != got { t.Errorf("expected %q, got %q", expected, got) } } func TestGaugeSetCurrentTime(t *testing.T) { g := NewGauge(GaugeOpts{ Name: "test_name", Help: "test help", }) g.SetToCurrentTime() unixTime := float64(time.Now().Unix()) m := &dto.Metric{} g.Write(m) delta := unixTime - m.GetGauge().GetValue() // This is just a smoke test to make sure SetToCurrentTime is not // totally off. Tests with current time involved are hard... if math.Abs(delta) > 5 { t.Errorf("Gauge set to current time deviates from current time by more than 5s, delta is %f seconds", delta) } } client_golang-1.11.0/prometheus/go_collector.go000066400000000000000000000260051405741072000215770ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "runtime" "runtime/debug" "sync" "time" ) type goCollector struct { goroutinesDesc *Desc threadsDesc *Desc gcDesc *Desc goInfoDesc *Desc // ms... are memstats related. msLast *runtime.MemStats // Previously collected memstats. msLastTimestamp time.Time msMtx sync.Mutex // Protects msLast and msLastTimestamp. msMetrics memStatsMetrics msRead func(*runtime.MemStats) // For mocking in tests. msMaxWait time.Duration // Wait time for fresh memstats. msMaxAge time.Duration // Maximum allowed age of old memstats. } // NewGoCollector is the obsolete version of collectors.NewGoCollector. // See there for documentation. // // Deprecated: Use collectors.NewGoCollector instead. func NewGoCollector() Collector { return &goCollector{ goroutinesDesc: NewDesc( "go_goroutines", "Number of goroutines that currently exist.", nil, nil), threadsDesc: NewDesc( "go_threads", "Number of OS threads created.", nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", "A summary of the pause duration of garbage collection cycles.", nil, nil), goInfoDesc: NewDesc( "go_info", "Information about the Go environment.", nil, Labels{"version": runtime.Version()}), msLast: &runtime.MemStats{}, msRead: runtime.ReadMemStats, msMaxWait: time.Second, msMaxAge: 5 * time.Minute, msMetrics: memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), "Number of bytes allocated and still in use.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), "Total number of bytes allocated, even if freed.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, valType: CounterValue, }, { desc: NewDesc( memstatNamespace("sys_bytes"), "Number of bytes obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("lookups_total"), "Total number of pointer lookups.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), "Total number of mallocs.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, valType: CounterValue, }, { desc: NewDesc( memstatNamespace("frees_total"), "Total number of frees.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, valType: CounterValue, }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), "Number of heap bytes allocated and still in use.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), "Number of heap bytes obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), "Number of heap bytes waiting to be used.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), "Number of heap bytes that are in use.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), "Number of heap bytes released to OS.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("heap_objects"), "Number of allocated objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), "Number of bytes in use by the stack allocator.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), "Number of bytes obtained from system for stack allocator.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), "Number of bytes in use by mspan structures.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), "Number of bytes used for mspan structures obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), "Number of bytes in use by mcache structures.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), "Number of bytes used for mcache structures obtained from system.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), "Number of bytes used by the profiling bucket hash table.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), "Number of bytes used for garbage collection system metadata.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), "Number of bytes used for other system allocations.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), "Number of heap bytes when next garbage collection will take place.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("last_gc_time_seconds"), "Number of seconds since 1970 of last garbage collection.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, valType: GaugeValue, }, { desc: NewDesc( memstatNamespace("gc_cpu_fraction"), "The fraction of this program's available CPU time used by the GC since the program started.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, valType: GaugeValue, }, }, } } func memstatNamespace(s string) string { return "go_memstats_" + s } // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { ch <- c.goroutinesDesc ch <- c.threadsDesc ch <- c.gcDesc ch <- c.goInfoDesc for _, i := range c.msMetrics { ch <- i.desc } } // Collect returns the current state of all metrics of the collector. func (c *goCollector) Collect(ch chan<- Metric) { var ( ms = &runtime.MemStats{} done = make(chan struct{}) ) // Start reading memstats first as it might take a while. go func() { c.msRead(ms) c.msMtx.Lock() c.msLast = ms c.msLastTimestamp = time.Now() c.msMtx.Unlock() close(done) }() ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) n, _ := runtime.ThreadCreateProfile(nil) ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) var stats debug.GCStats stats.PauseQuantiles = make([]time.Duration, 5) debug.ReadGCStats(&stats) quantiles := make(map[float64]float64) for idx, pq := range stats.PauseQuantiles[1:] { quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() } quantiles[0.0] = stats.PauseQuantiles[0].Seconds() ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) timer := time.NewTimer(c.msMaxWait) select { case <-done: // Our own ReadMemStats succeeded in time. Use it. timer.Stop() // Important for high collection frequencies to not pile up timers. c.msCollect(ch, ms) return case <-timer.C: // Time out, use last memstats if possible. Continue below. } c.msMtx.Lock() if time.Since(c.msLastTimestamp) < c.msMaxAge { // Last memstats are recent enough. Collect from them under the lock. c.msCollect(ch, c.msLast) c.msMtx.Unlock() return } // If we are here, the last memstats are too old or don't exist. We have // to wait until our own ReadMemStats finally completes. For that to // happen, we have to release the lock. c.msMtx.Unlock() <-done c.msCollect(ch, ms) } func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { for _, i := range c.msMetrics { ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) } } // memStatsMetrics provide description, value, and value type for memstat metrics. type memStatsMetrics []struct { desc *Desc eval func(*runtime.MemStats) float64 valType ValueType } // NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector. // See there for documentation. // // Deprecated: Use collectors.NewBuildInfoCollector instead. func NewBuildInfoCollector() Collector { path, version, sum := "unknown", "unknown", "unknown" if bi, ok := debug.ReadBuildInfo(); ok { path = bi.Main.Path version = bi.Main.Version sum = bi.Main.Sum } c := &selfCollector{MustNewConstMetric( NewDesc( "go_build_info", "Build information about the main Go module.", nil, Labels{"path": path, "version": version, "checksum": sum}, ), GaugeValue, 1)} c.init(c.self) return c } client_golang-1.11.0/prometheus/go_collector_test.go000066400000000000000000000137241405741072000226420ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "runtime" "testing" "time" dto "github.com/prometheus/client_model/go" ) func TestGoCollectorGoroutines(t *testing.T) { var ( c = NewGoCollector() metricCh = make(chan Metric) waitCh = make(chan struct{}) endGoroutineCh = make(chan struct{}) endCollectionCh = make(chan struct{}) old = -1 ) defer func() { close(endGoroutineCh) // Drain the collect channel to prevent goroutine leak. for { select { case <-metricCh: case <-endCollectionCh: return } } }() go func() { c.Collect(metricCh) for i := 1; i <= 10; i++ { // Start 10 goroutines to be sure we'll detect an // increase even if unrelated goroutines happen to // terminate during this test. go func(c <-chan struct{}) { <-c }(endGoroutineCh) } <-waitCh c.Collect(metricCh) close(endCollectionCh) }() for { select { case m := <-metricCh: // m can be Gauge or Counter, // currently just test the go_goroutines Gauge // and ignore others. if m.Desc().fqName != "go_goroutines" { continue } pb := &dto.Metric{} m.Write(pb) if pb.GetGauge() == nil { continue } if old == -1 { old = int(pb.GetGauge().GetValue()) close(waitCh) continue } if diff := old - int(pb.GetGauge().GetValue()); diff > -1 { t.Errorf("want at least one new goroutine, got %d fewer", diff) } case <-time.After(1 * time.Second): t.Fatalf("expected collect timed out") } break } } func TestGoCollectorGC(t *testing.T) { var ( c = NewGoCollector() metricCh = make(chan Metric) waitCh = make(chan struct{}) endCollectionCh = make(chan struct{}) oldGC uint64 oldPause float64 ) go func() { c.Collect(metricCh) // force GC runtime.GC() <-waitCh c.Collect(metricCh) close(endCollectionCh) }() defer func() { // Drain the collect channel to prevent goroutine leak. for { select { case <-metricCh: case <-endCollectionCh: return } } }() first := true for { select { case metric := <-metricCh: pb := &dto.Metric{} metric.Write(pb) if pb.GetSummary() == nil { continue } if len(pb.GetSummary().Quantile) != 5 { t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) } for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { if *pb.GetSummary().Quantile[idx].Quantile != want { t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) } } if first { first = false oldGC = *pb.GetSummary().SampleCount oldPause = *pb.GetSummary().SampleSum close(waitCh) continue } if diff := *pb.GetSummary().SampleCount - oldGC; diff < 1 { t.Errorf("want at least 1 new garbage collection run, got %d", diff) } if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { t.Errorf("want an increase in pause time, got a change of %f", diff) } case <-time.After(1 * time.Second): t.Fatalf("expected collect timed out") } break } } func TestGoCollectorMemStats(t *testing.T) { var ( c = NewGoCollector().(*goCollector) got uint64 ) checkCollect := func(want uint64) { metricCh := make(chan Metric) endCh := make(chan struct{}) go func() { c.Collect(metricCh) close(endCh) }() Collect: for { select { case metric := <-metricCh: if metric.Desc().fqName != "go_memstats_alloc_bytes" { continue Collect } pb := &dto.Metric{} metric.Write(pb) got = uint64(pb.GetGauge().GetValue()) case <-endCh: break Collect } } if want != got { t.Errorf("unexpected value of go_memstats_alloc_bytes, want %d, got %d", want, got) } } // Speed up the timing to make the test faster. c.msMaxWait = 5 * time.Millisecond c.msMaxAge = 50 * time.Millisecond // Scenario 1: msRead responds slowly, no previous memstats available, // msRead is executed anyway. c.msRead = func(ms *runtime.MemStats) { time.Sleep(20 * time.Millisecond) ms.Alloc = 1 } checkCollect(1) // Now msLast is set. c.msMtx.Lock() if want, got := uint64(1), c.msLast.Alloc; want != got { t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got) } c.msMtx.Unlock() // Scenario 2: msRead responds fast, previous memstats available, new // value collected. c.msRead = func(ms *runtime.MemStats) { ms.Alloc = 2 } checkCollect(2) // msLast is set, too. c.msMtx.Lock() if want, got := uint64(2), c.msLast.Alloc; want != got { t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got) } c.msMtx.Unlock() // Scenario 3: msRead responds slowly, previous memstats available, old // value collected. c.msRead = func(ms *runtime.MemStats) { time.Sleep(20 * time.Millisecond) ms.Alloc = 3 } checkCollect(2) // After waiting, new value is still set in msLast. time.Sleep(80 * time.Millisecond) c.msMtx.Lock() if want, got := uint64(3), c.msLast.Alloc; want != got { t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got) } c.msMtx.Unlock() // Scenario 4: msRead responds slowly, previous memstats is too old, new // value collected. c.msRead = func(ms *runtime.MemStats) { time.Sleep(20 * time.Millisecond) ms.Alloc = 4 } checkCollect(4) c.msMtx.Lock() if want, got := uint64(4), c.msLast.Alloc; want != got { t.Errorf("unexpected of msLast.Alloc, want %d, got %d", want, got) } c.msMtx.Unlock() } client_golang-1.11.0/prometheus/graphite/000077500000000000000000000000001405741072000203755ustar00rootroot00000000000000client_golang-1.11.0/prometheus/graphite/bridge.go000066400000000000000000000160101405741072000221560ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package graphite provides a bridge to push Prometheus metrics to a Graphite // server. package graphite import ( "bufio" "context" "errors" "fmt" "io" "net" "sort" "time" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" "github.com/prometheus/client_golang/prometheus" ) const ( defaultInterval = 15 * time.Second millisecondsPerSecond = 1000 ) // HandlerErrorHandling defines how a Handler serving metrics will handle // errors. type HandlerErrorHandling int // These constants cause handlers serving metrics to behave as described if // errors are encountered. const ( // Ignore errors and try to push as many metrics to Graphite as possible. ContinueOnError HandlerErrorHandling = iota // Abort the push to Graphite upon the first error encountered. AbortOnError ) // Config defines the Graphite bridge config. type Config struct { // Whether to use Graphite tags or not. Defaults to false. UseTags bool // The url to push data to. Required. URL string // The prefix for the pushed Graphite metrics. Defaults to empty string. Prefix string // The interval to use for pushing data to Graphite. Defaults to 15 seconds. Interval time.Duration // The timeout for pushing metrics to Graphite. Defaults to 15 seconds. Timeout time.Duration // The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer. Gatherer prometheus.Gatherer // The logger that messages are written to. Defaults to no logging. Logger Logger // ErrorHandling defines how errors are handled. Note that errors are // logged regardless of the configured ErrorHandling provided Logger // is not nil. ErrorHandling HandlerErrorHandling } // Bridge pushes metrics to the configured Graphite server. type Bridge struct { useTags bool url string prefix string interval time.Duration timeout time.Duration errorHandling HandlerErrorHandling logger Logger g prometheus.Gatherer } // Logger is the minimal interface Bridge needs for logging. Note that // log.Logger from the standard library implements this interface, and it is // easy to implement by custom loggers, if they don't do so already anyway. type Logger interface { Println(v ...interface{}) } // NewBridge returns a pointer to a new Bridge struct. func NewBridge(c *Config) (*Bridge, error) { b := &Bridge{} b.useTags = c.UseTags if c.URL == "" { return nil, errors.New("missing URL") } b.url = c.URL if c.Gatherer == nil { b.g = prometheus.DefaultGatherer } else { b.g = c.Gatherer } if c.Logger != nil { b.logger = c.Logger } if c.Prefix != "" { b.prefix = c.Prefix } var z time.Duration if c.Interval == z { b.interval = defaultInterval } else { b.interval = c.Interval } if c.Timeout == z { b.timeout = defaultInterval } else { b.timeout = c.Timeout } b.errorHandling = c.ErrorHandling return b, nil } // Run starts the event loop that pushes Prometheus metrics to Graphite at the // configured interval. func (b *Bridge) Run(ctx context.Context) { ticker := time.NewTicker(b.interval) defer ticker.Stop() for { select { case <-ticker.C: if err := b.Push(); err != nil && b.logger != nil { b.logger.Println("error pushing to Graphite:", err) } case <-ctx.Done(): return } } } // Push pushes Prometheus metrics to the configured Graphite server. func (b *Bridge) Push() error { mfs, err := b.g.Gather() if err != nil || len(mfs) == 0 { switch b.errorHandling { case AbortOnError: return err case ContinueOnError: if b.logger != nil { b.logger.Println("continue on error:", err) } default: panic("unrecognized error handling value") } } conn, err := net.DialTimeout("tcp", b.url, b.timeout) if err != nil { return err } defer conn.Close() return writeMetrics(conn, mfs, b.useTags, b.prefix, model.Now()) } func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, useTags bool, prefix string, now model.Time) error { vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{ Timestamp: now, }, mfs...) if err != nil { return err } buf := bufio.NewWriter(w) for _, s := range vec { for _, c := range prefix { if _, err := buf.WriteRune(c); err != nil { return err } } if err := buf.WriteByte('.'); err != nil { return err } if err := writeMetric(buf, s.Metric, useTags); err != nil { return err } if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil { return err } if err := buf.Flush(); err != nil { return err } } return nil } func writeMetric(buf *bufio.Writer, m model.Metric, useTags bool) error { metricName, hasName := m[model.MetricNameLabel] numLabels := len(m) - 1 if !hasName { numLabels = len(m) } var err error switch numLabels { case 0: if hasName { return writeSanitized(buf, string(metricName)) } default: if err = writeSanitized(buf, string(metricName)); err != nil { return err } if useTags { return writeTags(buf, m) } else { return writeLabels(buf, m, numLabels) } } return nil } func writeTags(buf *bufio.Writer, m model.Metric) error { for label, value := range m { if label != model.MetricNameLabel { buf.WriteRune(';') if _, err := buf.WriteString(string(label)); err != nil { return err } buf.WriteRune('=') if _, err := buf.WriteString(string(value)); err != nil { return err } } } return nil } func writeLabels(buf *bufio.Writer, m model.Metric, numLabels int) error { labelStrings := make([]string, 0, numLabels) for label, value := range m { if label != model.MetricNameLabel { labelString := string(label) + " " + string(value) labelStrings = append(labelStrings, labelString) } } sort.Strings(labelStrings) for _, s := range labelStrings { if err := buf.WriteByte('.'); err != nil { return err } if err := writeSanitized(buf, s); err != nil { return err } } return nil } func writeSanitized(buf *bufio.Writer, s string) error { prevUnderscore := false for _, c := range s { c = replaceInvalidRune(c) if c == '_' { if prevUnderscore { continue } prevUnderscore = true } else { prevUnderscore = false } if _, err := buf.WriteRune(c); err != nil { return err } } return nil } func replaceInvalidRune(c rune) rune { if c == ' ' { return '.' } if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || c == '-' || (c >= '0' && c <= '9')) { return '_' } return c } client_golang-1.11.0/prometheus/graphite/bridge_test.go000066400000000000000000000304741405741072000232270ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package graphite import ( "bufio" "bytes" "context" "fmt" "io" "log" "net" "os" "reflect" "regexp" "sort" "strings" "testing" "time" "github.com/prometheus/common/model" "github.com/prometheus/client_golang/prometheus" ) func TestSanitize(t *testing.T) { testCases := []struct { in, out string }{ {in: "hello", out: "hello"}, {in: "hE/l1o", out: "hE_l1o"}, {in: "he,*ll(.o", out: "he_ll_o"}, {in: "hello_there%^&", out: "hello_there_"}, {in: "hell-.o", out: "hell-_o"}, } var buf bytes.Buffer w := bufio.NewWriter(&buf) for i, tc := range testCases { if err := writeSanitized(w, tc.in); err != nil { t.Fatalf("write failed: %v", err) } if err := w.Flush(); err != nil { t.Fatalf("flush failed: %v", err) } if want, got := tc.out, buf.String(); want != got { t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want) } buf.Reset() } } func TestWriteSummary(t *testing.T) { testWriteSummary(t, false) testWriteSummary(t, true) } func testWriteSummary(t *testing.T, useTags bool) { sumVec := prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "name", Help: "docstring", ConstLabels: prometheus.Labels{"constname": "constvalue"}, Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }, []string{"labelname"}, ) sumVec.WithLabelValues("val1").Observe(float64(10)) sumVec.WithLabelValues("val1").Observe(float64(20)) sumVec.WithLabelValues("val1").Observe(float64(30)) sumVec.WithLabelValues("val2").Observe(float64(20)) sumVec.WithLabelValues("val2").Observe(float64(30)) sumVec.WithLabelValues("val2").Observe(float64(40)) reg := prometheus.NewRegistry() reg.MustRegister(sumVec) mfs, err := reg.Gather() if err != nil { t.Fatalf("error: %v", err) } testCases := []struct { prefix string }{ {prefix: "prefix"}, {prefix: "pre/fix"}, {prefix: "pre.fix"}, } var ( want = `%s.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043 %s.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043 %s.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043 %s.name_sum.constname.constvalue.labelname.val1 60 1477043 %s.name_count.constname.constvalue.labelname.val1 3 1477043 %s.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043 %s.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043 %s.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043 %s.name_sum.constname.constvalue.labelname.val2 90 1477043 %s.name_count.constname.constvalue.labelname.val2 3 1477043 ` wantTagged = `%s.name;constname=constvalue;labelname=val1;quantile=0.5 20 1477043 %s.name;constname=constvalue;labelname=val1;quantile=0.9 30 1477043 %s.name;constname=constvalue;labelname=val1;quantile=0.99 30 1477043 %s.name_sum;constname=constvalue;labelname=val1 60 1477043 %s.name_count;constname=constvalue;labelname=val1 3 1477043 %s.name;constname=constvalue;labelname=val2;quantile=0.5 30 1477043 %s.name;constname=constvalue;labelname=val2;quantile=0.9 40 1477043 %s.name;constname=constvalue;labelname=val2;quantile=0.99 40 1477043 %s.name_sum;constname=constvalue;labelname=val2 90 1477043 %s.name_count;constname=constvalue;labelname=val2 3 1477043 ` ) if useTags { want = wantTagged } for i, tc := range testCases { now := model.Time(1477043083) var buf bytes.Buffer err = writeMetrics(&buf, mfs, useTags, tc.prefix, now) if err != nil { t.Fatalf("error: %v", err) } wantWithPrefix := fmt.Sprintf(want, tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix, ) got := buf.String() if err := checkLinesAreEqual(wantWithPrefix, got, useTags); err != nil { t.Fatalf("test case index %d:\n%s", i, err.Error()) } } } func TestWriteHistogram(t *testing.T) { testWriteHistogram(t, false) testWriteHistogram(t, true) } func testWriteHistogram(t *testing.T, useTags bool) { histVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "name", Help: "docstring", ConstLabels: prometheus.Labels{"constname": "constvalue"}, Buckets: []float64{0.01, 0.02, 0.05, 0.1}, }, []string{"labelname"}, ) histVec.WithLabelValues("val1").Observe(float64(10)) histVec.WithLabelValues("val1").Observe(float64(20)) histVec.WithLabelValues("val1").Observe(float64(30)) histVec.WithLabelValues("val2").Observe(float64(20)) histVec.WithLabelValues("val2").Observe(float64(30)) histVec.WithLabelValues("val2").Observe(float64(40)) reg := prometheus.NewRegistry() reg.MustRegister(histVec) mfs, err := reg.Gather() if err != nil { t.Fatalf("error: %v", err) } now := model.Time(1477043083) var buf bytes.Buffer err = writeMetrics(&buf, mfs, useTags, "prefix", now) if err != nil { t.Fatalf("error: %v", err) } var ( want = `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043 prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043 prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043 prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043 prefix.name_sum.constname.constvalue.labelname.val1 60 1477043 prefix.name_count.constname.constvalue.labelname.val1 3 1477043 prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043 prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043 prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043 prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043 prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043 prefix.name_sum.constname.constvalue.labelname.val2 90 1477043 prefix.name_count.constname.constvalue.labelname.val2 3 1477043 prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043 ` wantTagged = `prefix.name_bucket;constname=constvalue;labelname=val1;le=0.01 0 1477043 prefix.name_bucket;constname=constvalue;labelname=val1;le=0.02 0 1477043 prefix.name_bucket;constname=constvalue;labelname=val1;le=0.05 0 1477043 prefix.name_bucket;constname=constvalue;labelname=val1;le=0.1 0 1477043 prefix.name_sum;constname=constvalue;labelname=val1 60 1477043 prefix.name_count;constname=constvalue;labelname=val1 3 1477043 prefix.name_bucket;constname=constvalue;labelname=val1;le=+Inf 3 1477043 prefix.name_bucket;constname=constvalue;labelname=val2;le=0.01 0 1477043 prefix.name_bucket;constname=constvalue;labelname=val2;le=0.02 0 1477043 prefix.name_bucket;constname=constvalue;labelname=val2;le=0.05 0 1477043 prefix.name_bucket;constname=constvalue;labelname=val2;le=0.1 0 1477043 prefix.name_sum;constname=constvalue;labelname=val2 90 1477043 prefix.name_count;constname=constvalue;labelname=val2 3 1477043 prefix.name_bucket;constname=constvalue;labelname=val2;le=+Inf 3 1477043 ` ) if useTags { want = wantTagged } got := buf.String() if err := checkLinesAreEqual(want, got, useTags); err != nil { t.Fatalf(err.Error()) } } func TestToReader(t *testing.T) { testToReader(t, false) testToReader(t, true) } func testToReader(t *testing.T, useTags bool) { cntVec := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "name", Help: "docstring", ConstLabels: prometheus.Labels{"constname": "constvalue"}, }, []string{"labelname"}, ) cntVec.WithLabelValues("val1").Inc() cntVec.WithLabelValues("val2").Inc() reg := prometheus.NewRegistry() reg.MustRegister(cntVec) var ( want = `prefix.name.constname.constvalue.labelname.val1 1 1477043 prefix.name.constname.constvalue.labelname.val2 1 1477043 ` wantTagged = `prefix.name;constname=constvalue;labelname=val1 1 1477043 prefix.name;constname=constvalue;labelname=val2 1 1477043 ` ) if useTags { want = wantTagged } mfs, err := reg.Gather() if err != nil { t.Fatalf("error: %v", err) } now := model.Time(1477043083) var buf bytes.Buffer err = writeMetrics(&buf, mfs, useTags, "prefix", now) if err != nil { t.Fatalf("error: %v", err) } got := buf.String() if err := checkLinesAreEqual(want, got, useTags); err != nil { t.Fatalf(err.Error()) } } func checkLinesAreEqual(w, g string, useTags bool) error { if useTags { taggedLineRegexp := regexp.MustCompile(`;| `) wantLines, err := stringToLines(w) if err != nil { return err } gotLines, err := stringToLines(g) if err != nil { return err } for lineInd := range gotLines { var log string // Tagged metric, order of tags doesn't matter // m1 := "prefix.name;tag1=val1;tag2=val2 3 1477043" // m2 := "prefix.name;tag2=val2;tag1=val1 3 1477043" // m1 should be equal to m2 wantSplit := taggedLineRegexp.Split(wantLines[lineInd], -1) gotSplit := taggedLineRegexp.Split(gotLines[lineInd], -1) sort.Strings(wantSplit) sort.Strings(gotSplit) log += fmt.Sprintf("want: %v\ngot: %v\n\n", wantSplit, gotSplit) if !reflect.DeepEqual(wantSplit, gotSplit) { return fmt.Errorf(log) } } return nil } if w != g { return fmt.Errorf("wanted:\n\n%s\ngot:\n\n%s", w, g) } return nil } func stringToLines(s string) (lines []string, err error) { scanner := bufio.NewScanner(strings.NewReader(s)) for scanner.Scan() { lines = append(lines, scanner.Text()) } err = scanner.Err() return } func TestPush(t *testing.T) { reg := prometheus.NewRegistry() cntVec := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "name", Help: "docstring", ConstLabels: prometheus.Labels{"constname": "constvalue"}, }, []string{"labelname"}, ) cntVec.WithLabelValues("val1").Inc() cntVec.WithLabelValues("val2").Inc() reg.MustRegister(cntVec) host := "localhost" port := ":56789" b, err := NewBridge(&Config{ URL: host + port, Gatherer: reg, Prefix: "prefix", }) if err != nil { t.Fatalf("error creating bridge: %v", err) } nmg, err := newMockGraphite(port) if err != nil { t.Fatalf("error creating mock graphite: %v", err) } defer nmg.Close() err = b.Push() if err != nil { t.Fatalf("error pushing: %v", err) } wants := []string{ "prefix.name.constname.constvalue.labelname.val1 1", "prefix.name.constname.constvalue.labelname.val2 1", } select { case got := <-nmg.readc: for _, want := range wants { matched, err := regexp.MatchString(want, got) if err != nil { t.Fatalf("error pushing: %v", err) } if !matched { t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got) } } return case err := <-nmg.errc: t.Fatalf("error reading push: %v", err) case <-time.After(50 * time.Millisecond): t.Fatalf("no result from graphite server") } } func newMockGraphite(port string) (*mockGraphite, error) { readc := make(chan string) errc := make(chan error) ln, err := net.Listen("tcp", port) if err != nil { return nil, err } go func() { conn, err := ln.Accept() if err != nil { errc <- err } var b bytes.Buffer io.Copy(&b, conn) readc <- b.String() }() return &mockGraphite{ readc: readc, errc: errc, Listener: ln, }, nil } type mockGraphite struct { readc chan string errc chan error net.Listener } func ExampleBridge() { b, err := NewBridge(&Config{ URL: "graphite.example.org:3099", Gatherer: prometheus.DefaultGatherer, Prefix: "prefix", Interval: 15 * time.Second, Timeout: 10 * time.Second, ErrorHandling: AbortOnError, Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile), }) if err != nil { panic(err) } go func() { // Start something in a goroutine that uses metrics. }() // Push initial metrics to Graphite. Fail fast if the push fails. if err := b.Push(); err != nil { panic(err) } // Create a Context to control stopping the Run() loop that pushes // metrics to Graphite. ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Start pushing metrics to Graphite in the Run() loop. b.Run(ctx) } client_golang-1.11.0/prometheus/histogram.go000066400000000000000000000536771405741072000211400ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "math" "runtime" "sort" "sync" "sync/atomic" "time" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" ) // A Histogram counts individual observations from an event or sample stream in // configurable buckets. Similar to a summary, it also provides a sum of // observations and an observation count. // // On the Prometheus server, quantiles can be calculated from a Histogram using // the histogram_quantile function in the query language. // // Note that Histograms, in contrast to Summaries, can be aggregated with the // Prometheus query language (see the documentation for detailed // procedures). However, Histograms require the user to pre-define suitable // buckets, and they are in general less accurate. The Observe method of a // Histogram has a very low performance overhead in comparison with the Observe // method of a Summary. // // To create Histogram instances, use NewHistogram. type Histogram interface { Metric Collector // Observe adds a single observation to the histogram. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting // counter resets in the sum of observations. See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) } // bucketLabel is used for the label that defines the upper bound of a // bucket of a histogram ("le" -> "less or equal"). const bucketLabel = "le" // DefBuckets are the default Histogram buckets. The default buckets are // tailored to broadly measure the response time (in seconds) of a network // service. Most likely, however, you will be required to define buckets // customized to your use case. var ( DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} errBucketLabelNotAllowed = fmt.Errorf( "%q is not allowed as label name in histograms", bucketLabel, ) ) // LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest // bucket has an upper bound of 'start'. The final +Inf bucket is not counted // and not included in the returned slice. The returned slice is meant to be // used for the Buckets field of HistogramOpts. // // The function panics if 'count' is zero or negative. func LinearBuckets(start, width float64, count int) []float64 { if count < 1 { panic("LinearBuckets needs a positive count") } buckets := make([]float64, count) for i := range buckets { buckets[i] = start start += width } return buckets } // ExponentialBuckets creates 'count' buckets, where the lowest bucket has an // upper bound of 'start' and each following bucket's upper bound is 'factor' // times the previous bucket's upper bound. The final +Inf bucket is not counted // and not included in the returned slice. The returned slice is meant to be // used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, // or if 'factor' is less than or equal 1. func ExponentialBuckets(start, factor float64, count int) []float64 { if count < 1 { panic("ExponentialBuckets needs a positive count") } if start <= 0 { panic("ExponentialBuckets needs a positive start value") } if factor <= 1 { panic("ExponentialBuckets needs a factor greater than 1") } buckets := make([]float64, count) for i := range buckets { buckets[i] = start start *= factor } return buckets } // HistogramOpts bundles the options for creating a Histogram metric. It is // mandatory to set Name to a non-empty string. All other fields are optional // and can safely be left at their zero value, although it is strongly // encouraged to set a Help string. type HistogramOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Histogram (created by joining these components with // "_"). Only Name is mandatory, the others merely help structuring the // name. Note that the fully-qualified name of the Histogram must be a // valid Prometheus metric name. Namespace string Subsystem string Name string // Help provides information about this Histogram. // // Metrics with the same fully-qualified name must have the same Help // string. Help string // ConstLabels are used to attach fixed labels to this metric. Metrics // with the same fully-qualified name must have the same label names in // their ConstLabels. // // ConstLabels are only used rarely. In particular, do not use them to // attach the same labels to all your metrics. Those use cases are // better covered by target labels set by the scraping Prometheus // server, or by one specific metric (e.g. a build_info or a // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels // Buckets defines the buckets into which observations are counted. Each // element in the slice is the upper inclusive bound of a bucket. The // values must be sorted in strictly increasing order. There is no need // to add a highest bucket with +Inf bound, it will be added // implicitly. The default value is DefBuckets. Buckets []float64 } // NewHistogram creates a new Histogram based on the provided HistogramOpts. It // panics if the buckets in HistogramOpts are not in strictly increasing order. // // The returned implementation also implements ExemplarObserver. It is safe to // perform the corresponding type assertion. Exemplars are tracked separately // for each bucket. func NewHistogram(opts HistogramOpts) Histogram { return newHistogram( NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), opts, ) } func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { if len(desc.variableLabels) != len(labelValues) { panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { if n == bucketLabel { panic(errBucketLabelNotAllowed) } } for _, lp := range desc.constLabelPairs { if lp.GetName() == bucketLabel { panic(errBucketLabelNotAllowed) } } if len(opts.Buckets) == 0 { opts.Buckets = DefBuckets } h := &histogram{ desc: desc, upperBounds: opts.Buckets, labelPairs: MakeLabelPairs(desc, labelValues), counts: [2]*histogramCounts{{}, {}}, now: time.Now, } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { if upperBound >= h.upperBounds[i+1] { panic(fmt.Errorf( "histogram buckets must be in increasing order: %f >= %f", upperBound, h.upperBounds[i+1], )) } } else { if math.IsInf(upperBound, +1) { // The +Inf bucket is implicit. Remove it here. h.upperBounds = h.upperBounds[:i] } } } // Finally we know the final length of h.upperBounds and can make buckets // for both counts as well as exemplars: h.counts[0].buckets = make([]uint64, len(h.upperBounds)) h.counts[1].buckets = make([]uint64, len(h.upperBounds)) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.init(h) // Init self-collection. return h } type histogramCounts struct { // sumBits contains the bits of the float64 representing the sum of all // observations. sumBits and count have to go first in the struct to // guarantee alignment for atomic operations. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG sumBits uint64 count uint64 buckets []uint64 } type histogram struct { // countAndHotIdx enables lock-free writes with use of atomic updates. // The most significant bit is the hot index [0 or 1] of the count field // below. Observe calls update the hot one. All remaining bits count the // number of Observe calls. Observe starts by incrementing this counter, // and finish by incrementing the count field in the respective // histogramCounts, as a marker for completion. // // Calls of the Write method (which are non-mutating reads from the // perspective of the histogram) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the // last observation on the now cool one has completed. All cool fields must // be merged into the new hot before releasing writeMtx. // // Fields with atomic access first! See alignment constraint: // http://golang.org/pkg/sync/atomic/#pkg-note-BUG countAndHotIdx uint64 selfCollector desc *Desc writeMtx sync.Mutex // Only used in the Write method. // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of // pointers to guarantee 64bit alignment of the histogramCounts, see // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*histogramCounts upperBounds []float64 labelPairs []*dto.LabelPair exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. now func() time.Time // To mock out time.Now() for testing. } func (h *histogram) Desc() *Desc { return h.desc } func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) h.updateExemplar(v, i, e) } func (h *histogram) Write(out *dto.Metric) error { // For simplicity, we protect this whole method by a mutex. It is not in // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. h.writeMtx.Lock() defer h.writeMtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full // description of the algorithm. n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) // count is contained unchanged in the lower 63 bits. count := n & ((1 << 63) - 1) // The most significant bit tells us which counts is hot. The complement // is thus the cold one. hotCounts := h.counts[n>>63] coldCounts := h.counts[(^n)>>63] // Await cooldown. for count != atomic.LoadUint64(&coldCounts.count) { runtime.Gosched() // Let observations get work done. } his := &dto.Histogram{ Bucket: make([]*dto.Bucket, len(h.upperBounds)), SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } var cumCount uint64 for i, upperBound := range h.upperBounds { cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) his.Bucket[i] = &dto.Bucket{ CumulativeCount: proto.Uint64(cumCount), UpperBound: proto.Float64(upperBound), } if e := h.exemplars[i].Load(); e != nil { his.Bucket[i].Exemplar = e.(*dto.Exemplar) } } // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { b := &dto.Bucket{ CumulativeCount: proto.Uint64(count), UpperBound: proto.Float64(math.Inf(1)), Exemplar: e.(*dto.Exemplar), } his.Bucket = append(his.Bucket, b) } out.Histogram = his out.Label = h.labelPairs // Finally add all the cold counts to the new hot counts and reset the cold counts. atomic.AddUint64(&hotCounts.count, count) atomic.StoreUint64(&coldCounts.count, 0) for { oldBits := atomic.LoadUint64(&hotCounts.sumBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { atomic.StoreUint64(&coldCounts.sumBits, 0) break } } for i := range h.upperBounds { atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) atomic.StoreUint64(&coldCounts.buckets[i], 0) } return nil } // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { // TODO(beorn7): For small numbers of buckets (<30), a linear search is // slightly faster than the binary search. If we really care, we could // switch from one search strategy to the other depending on the number // of buckets. // // Microbenchmarks (BenchmarkHistogramNoLabels): // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op return sort.SearchFloat64s(h.upperBounds, v) } // observe is the implementation for Observe without the findBucket part. func (h *histogram) observe(v float64, bucket int) { // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&h.countAndHotIdx, 1) hotCounts := h.counts[n>>63] if bucket < len(h.upperBounds) { atomic.AddUint64(&hotCounts.buckets[bucket], 1) } for { oldBits := atomic.LoadUint64(&hotCounts.sumBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + v) if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { break } } // Increment count last as we take it as a signal that the observation // is complete. atomic.AddUint64(&hotCounts.count, 1) } // updateExemplar replaces the exemplar for the provided bucket. With empty // labels, it's a no-op. It panics if any of the labels is invalid. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return } e, err := newExemplar(v, h.now(), l) if err != nil { panic(err) } h.exemplars[bucket].Store(e) } // HistogramVec is a Collector that bundles a set of Histograms that all share the // same Desc, but have different values for their variable labels. This is used // if you want to count the same thing partitioned by various dimensions // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewHistogramVec. type HistogramVec struct { *MetricVec } // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // partitioned by the given label names. func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, labelNames, opts.ConstLabels, ) return &HistogramVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { return newHistogram(desc, opts, lvs...) }), } } // GetMetricWithLabelValues returns the Histogram for the given slice of label // values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Histogram is created. // // It is possible to call this method without using the returned Histogram to only // create the new Histogram but leave it at its starting value, a Histogram without // any observations. // // Keeping the Histogram for later use is possible (and should be considered if // performance is critical), but keep in mind that Reset, DeleteLabelValues and // Delete can be used to delete the Histogram from the HistogramVec. In that case, the // Histogram will still exist, but it will not be exported anymore, even if a // Histogram with the same label values is created later. See also the CounterVec // example. // // An error is returned if the number of label values is not the same as the // number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // an alternative to avoid that type of mistake. For higher label numbers, the // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). // See also the GaugeVec example. func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Observer), err } return nil, err } // GetMetricWith returns the Histogram for the given Labels map (the label names // must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Histogram is created. Implications of // creating a Histogram without using it and keeping the Histogram for later use // are the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent // with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { h, err := v.GetMetricWithLabelValues(lvs...) if err != nil { panic(err) } return h } // With works as GetMetricWith but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *HistogramVec) With(labels Labels) Observer { h, err := v.GetMetricWith(labels) if err != nil { panic(err) } return h } // CurryWith returns a vector curried with the provided labels, i.e. the // returned vector has those labels pre-set for all labeled operations performed // on it. The cardinality of the curried vector is reduced accordingly. The // order of the remaining labels stays the same (just with the curried labels // taken out of the sequence – which is relevant for the // (GetMetric)WithLabelValues methods). It is possible to curry a curried // vector, but only with labels not yet used for currying before. // // The metrics contained in the HistogramVec are shared between the curried and // uncurried vectors. They are just accessed differently. Curried and uncurried // vectors behave identically in terms of collection. Only one must be // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &HistogramVec{vec}, err } return nil, err } // MustCurryWith works as CurryWith but panics where CurryWith would have // returned an error. func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { vec, err := v.CurryWith(labels) if err != nil { panic(err) } return vec } type constHistogram struct { desc *Desc count uint64 sum float64 buckets map[float64]uint64 labelPairs []*dto.LabelPair } func (h *constHistogram) Desc() *Desc { return h.desc } func (h *constHistogram) Write(out *dto.Metric) error { his := &dto.Histogram{} buckets := make([]*dto.Bucket, 0, len(h.buckets)) his.SampleCount = proto.Uint64(h.count) his.SampleSum = proto.Float64(h.sum) for upperBound, count := range h.buckets { buckets = append(buckets, &dto.Bucket{ CumulativeCount: proto.Uint64(count), UpperBound: proto.Float64(upperBound), }) } if len(buckets) > 0 { sort.Sort(buckSort(buckets)) } his.Bucket = buckets out.Histogram = his out.Label = h.labelPairs return nil } // NewConstHistogram returns a metric representing a Prometheus histogram with // fixed values for the count, sum, and bucket counts. As those parameters // cannot be changed, the returned value does not implement the Histogram // interface (but only the Metric interface). Users of this package will not // have much use for it in regular operations. However, when implementing custom // Collectors, it is useful as a throw-away metric that is generated on the fly // to send it to Prometheus in the Collect method. // // buckets is a map of upper bounds to cumulative counts, excluding the +Inf // bucket. // // NewConstHistogram returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. func NewConstHistogram( desc *Desc, count uint64, sum float64, buckets map[float64]uint64, labelValues ...string, ) (Metric, error) { if desc.err != nil { return nil, desc.err } if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } return &constHistogram{ desc: desc, count: count, sum: sum, buckets: buckets, labelPairs: MakeLabelPairs(desc, labelValues), }, nil } // MustNewConstHistogram is a version of NewConstHistogram that panics where // NewConstHistogram would have returned an error. func MustNewConstHistogram( desc *Desc, count uint64, sum float64, buckets map[float64]uint64, labelValues ...string, ) Metric { m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) if err != nil { panic(err) } return m } type buckSort []*dto.Bucket func (s buckSort) Len() int { return len(s) } func (s buckSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s buckSort) Less(i, j int) bool { return s[i].GetUpperBound() < s[j].GetUpperBound() } client_golang-1.11.0/prometheus/histogram_test.go000066400000000000000000000250571405741072000221660ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "math" "math/rand" "reflect" "runtime" "sort" "sync" "testing" "testing/quick" "time" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" dto "github.com/prometheus/client_model/go" ) func benchmarkHistogramObserve(w int, b *testing.B) { b.StopTimer() wg := new(sync.WaitGroup) wg.Add(w) g := new(sync.WaitGroup) g.Add(1) s := NewHistogram(HistogramOpts{}) for i := 0; i < w; i++ { go func() { g.Wait() for i := 0; i < b.N; i++ { s.Observe(float64(i)) } wg.Done() }() } b.StartTimer() g.Done() wg.Wait() } func BenchmarkHistogramObserve1(b *testing.B) { benchmarkHistogramObserve(1, b) } func BenchmarkHistogramObserve2(b *testing.B) { benchmarkHistogramObserve(2, b) } func BenchmarkHistogramObserve4(b *testing.B) { benchmarkHistogramObserve(4, b) } func BenchmarkHistogramObserve8(b *testing.B) { benchmarkHistogramObserve(8, b) } func benchmarkHistogramWrite(w int, b *testing.B) { b.StopTimer() wg := new(sync.WaitGroup) wg.Add(w) g := new(sync.WaitGroup) g.Add(1) s := NewHistogram(HistogramOpts{}) for i := 0; i < 1000000; i++ { s.Observe(float64(i)) } for j := 0; j < w; j++ { outs := make([]dto.Metric, b.N) go func(o []dto.Metric) { g.Wait() for i := 0; i < b.N; i++ { s.Write(&o[i]) } wg.Done() }(outs) } b.StartTimer() g.Done() wg.Wait() } func BenchmarkHistogramWrite1(b *testing.B) { benchmarkHistogramWrite(1, b) } func BenchmarkHistogramWrite2(b *testing.B) { benchmarkHistogramWrite(2, b) } func BenchmarkHistogramWrite4(b *testing.B) { benchmarkHistogramWrite(4, b) } func BenchmarkHistogramWrite8(b *testing.B) { benchmarkHistogramWrite(8, b) } func TestHistogramNonMonotonicBuckets(t *testing.T) { testCases := map[string][]float64{ "not strictly monotonic": {1, 2, 2, 3}, "not monotonic at all": {1, 2, 4, 3, 5}, "have +Inf in the middle": {1, 2, math.Inf(+1), 3}, } for name, buckets := range testCases { func() { defer func() { if r := recover(); r == nil { t.Errorf("Buckets %v are %s but NewHistogram did not panic.", buckets, name) } }() _ = NewHistogram(HistogramOpts{ Name: "test_histogram", Help: "helpless", Buckets: buckets, }) }() } } // Intentionally adding +Inf here to test if that case is handled correctly. // Also, getCumulativeCounts depends on it. var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} func TestHistogramConcurrency(t *testing.T) { if testing.Short() { t.Skip("Skipping test in short mode.") } rand.Seed(42) it := func(n uint32) bool { mutations := int(n%1e4 + 1e4) concLevel := int(n%5 + 1) total := mutations * concLevel var start, end sync.WaitGroup start.Add(1) end.Add(concLevel) sum := NewHistogram(HistogramOpts{ Name: "test_histogram", Help: "helpless", Buckets: testBuckets, }) allVars := make([]float64, total) var sampleSum float64 for i := 0; i < concLevel; i++ { vals := make([]float64, mutations) for j := 0; j < mutations; j++ { v := rand.NormFloat64() vals[j] = v allVars[i*mutations+j] = v sampleSum += v } go func(vals []float64) { start.Wait() for _, v := range vals { if n%2 == 0 { sum.Observe(v) } else { sum.(ExemplarObserver).ObserveWithExemplar(v, Labels{"foo": "bar"}) } } end.Done() }(vals) } sort.Float64s(allVars) start.Done() end.Wait() m := &dto.Metric{} sum.Write(m) if got, want := int(*m.Histogram.SampleCount), total; got != want { t.Errorf("got sample count %d, want %d", got, want) } if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { t.Errorf("got sample sum %f, want %f", got, want) } wantCounts := getCumulativeCounts(allVars) wantBuckets := len(testBuckets) if !math.IsInf(m.Histogram.Bucket[len(m.Histogram.Bucket)-1].GetUpperBound(), +1) { wantBuckets-- } if got := len(m.Histogram.Bucket); got != wantBuckets { t.Errorf("got %d buckets in protobuf, want %d", got, wantBuckets) } for i, wantBound := range testBuckets { if i == len(testBuckets)-1 { break // No +Inf bucket in protobuf. } if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { t.Errorf("got bound %f, want %f", gotBound, wantBound) } if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { t.Errorf("got count %d, want %d", gotCount, wantCount) } } return true } if err := quick.Check(it, nil); err != nil { t.Error(err) } } func TestHistogramVecConcurrency(t *testing.T) { if testing.Short() { t.Skip("Skipping test in short mode.") } rand.Seed(42) it := func(n uint32) bool { mutations := int(n%1e4 + 1e4) concLevel := int(n%7 + 1) vecLength := int(n%3 + 1) var start, end sync.WaitGroup start.Add(1) end.Add(concLevel) his := NewHistogramVec( HistogramOpts{ Name: "test_histogram", Help: "helpless", Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, }, []string{"label"}, ) allVars := make([][]float64, vecLength) sampleSums := make([]float64, vecLength) for i := 0; i < concLevel; i++ { vals := make([]float64, mutations) picks := make([]int, mutations) for j := 0; j < mutations; j++ { v := rand.NormFloat64() vals[j] = v pick := rand.Intn(vecLength) picks[j] = pick allVars[pick] = append(allVars[pick], v) sampleSums[pick] += v } go func(vals []float64) { start.Wait() for i, v := range vals { his.WithLabelValues(string('A' + rune(picks[i]))).Observe(v) } end.Done() }(vals) } for _, vars := range allVars { sort.Float64s(vars) } start.Done() end.Wait() for i := 0; i < vecLength; i++ { m := &dto.Metric{} s := his.WithLabelValues(string('A' + rune(i))) s.(Histogram).Write(m) if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { t.Errorf("got %d buckets in protobuf, want %d", got, want) } if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { t.Errorf("got sample count %d, want %d", got, want) } if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { t.Errorf("got sample sum %f, want %f", got, want) } wantCounts := getCumulativeCounts(allVars[i]) for j, wantBound := range testBuckets { if j == len(testBuckets)-1 { break // No +Inf bucket in protobuf. } if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { t.Errorf("got bound %f, want %f", gotBound, wantBound) } if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { t.Errorf("got count %d, want %d", gotCount, wantCount) } } } return true } if err := quick.Check(it, nil); err != nil { t.Error(err) } } func getCumulativeCounts(vars []float64) []uint64 { counts := make([]uint64, len(testBuckets)) for _, v := range vars { for i := len(testBuckets) - 1; i >= 0; i-- { if v > testBuckets[i] { break } counts[i]++ } } return counts } func TestBuckets(t *testing.T) { got := LinearBuckets(-15, 5, 6) want := []float64{-15, -10, -5, 0, 5, 10} if !reflect.DeepEqual(got, want) { t.Errorf("linear buckets: got %v, want %v", got, want) } got = ExponentialBuckets(100, 1.2, 3) want = []float64{100, 120, 144} if !reflect.DeepEqual(got, want) { t.Errorf("exponential buckets: got %v, want %v", got, want) } } func TestHistogramAtomicObserve(t *testing.T) { var ( quit = make(chan struct{}) his = NewHistogram(HistogramOpts{ Buckets: []float64{0.5, 10, 20}, }) ) defer func() { close(quit) }() observe := func() { for { select { case <-quit: return default: his.Observe(1) } } } go observe() go observe() go observe() for i := 0; i < 100; i++ { m := &dto.Metric{} if err := his.Write(m); err != nil { t.Fatal("unexpected error writing histogram:", err) } h := m.GetHistogram() if h.GetSampleCount() != uint64(h.GetSampleSum()) || h.GetSampleCount() != h.GetBucket()[1].GetCumulativeCount() || h.GetSampleCount() != h.GetBucket()[2].GetCumulativeCount() { t.Fatalf( "inconsistent counts in histogram: count=%d sum=%f buckets=[%d, %d]", h.GetSampleCount(), h.GetSampleSum(), h.GetBucket()[1].GetCumulativeCount(), h.GetBucket()[2].GetCumulativeCount(), ) } runtime.Gosched() } } func TestHistogramExemplar(t *testing.T) { now := time.Now() histogram := NewHistogram(HistogramOpts{ Name: "test", Help: "test help", Buckets: []float64{1, 2, 3, 4}, }).(*histogram) histogram.now = func() time.Time { return now } ts, err := ptypes.TimestampProto(now) if err != nil { t.Fatal(err) } expectedExemplars := []*dto.Exemplar{ nil, &dto.Exemplar{ Label: []*dto.LabelPair{ &dto.LabelPair{Name: proto.String("id"), Value: proto.String("2")}, }, Value: proto.Float64(1.6), Timestamp: ts, }, nil, &dto.Exemplar{ Label: []*dto.LabelPair{ &dto.LabelPair{Name: proto.String("id"), Value: proto.String("3")}, }, Value: proto.Float64(4), Timestamp: ts, }, &dto.Exemplar{ Label: []*dto.LabelPair{ &dto.LabelPair{Name: proto.String("id"), Value: proto.String("4")}, }, Value: proto.Float64(4.5), Timestamp: ts, }, } histogram.ObserveWithExemplar(1.5, Labels{"id": "1"}) histogram.ObserveWithExemplar(1.6, Labels{"id": "2"}) // To replace exemplar in bucket 0. histogram.ObserveWithExemplar(4, Labels{"id": "3"}) histogram.ObserveWithExemplar(4.5, Labels{"id": "4"}) // Should go to +Inf bucket. for i, ex := range histogram.exemplars { var got, expected string if val := ex.Load(); val != nil { got = val.(*dto.Exemplar).String() } if expectedExemplars[i] != nil { expected = expectedExemplars[i].String() } if got != expected { t.Errorf("expected exemplar %s, got %s.", expected, got) } } } client_golang-1.11.0/prometheus/internal/000077500000000000000000000000001405741072000204065ustar00rootroot00000000000000client_golang-1.11.0/prometheus/internal/metric.go000066400000000000000000000052241405741072000222230ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "sort" dto "github.com/prometheus/client_model/go" ) // metricSorter is a sortable slice of *dto.Metric. type metricSorter []*dto.Metric func (s metricSorter) Len() int { return len(s) } func (s metricSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s metricSorter) Less(i, j int) bool { if len(s[i].Label) != len(s[j].Label) { // This should not happen. The metrics are // inconsistent. However, we have to deal with the fact, as // people might use custom collectors or metric family injection // to create inconsistent metrics. So let's simply compare the // number of labels in this case. That will still yield // reproducible sorting. return len(s[i].Label) < len(s[j].Label) } for n, lp := range s[i].Label { vi := lp.GetValue() vj := s[j].Label[n].GetValue() if vi != vj { return vi < vj } } // We should never arrive here. Multiple metrics with the same // label set in the same scrape will lead to undefined ingestion // behavior. However, as above, we have to provide stable sorting // here, even for inconsistent metrics. So sort equal metrics // by their timestamp, with missing timestamps (implying "now") // coming last. if s[i].TimestampMs == nil { return false } if s[j].TimestampMs == nil { return true } return s[i].GetTimestampMs() < s[j].GetTimestampMs() } // NormalizeMetricFamilies returns a MetricFamily slice with empty // MetricFamilies pruned and the remaining MetricFamilies sorted by name within // the slice, with the contained Metrics sorted within each MetricFamily. func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { for _, mf := range metricFamiliesByName { sort.Sort(metricSorter(mf.Metric)) } names := make([]string, 0, len(metricFamiliesByName)) for name, mf := range metricFamiliesByName { if len(mf.Metric) > 0 { names = append(names, name) } } sort.Strings(names) result := make([]*dto.MetricFamily, 0, len(names)) for _, name := range names { result = append(result, metricFamiliesByName[name]) } return result } client_golang-1.11.0/prometheus/labels.go000066400000000000000000000050101405741072000203570ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "errors" "fmt" "strings" "unicode/utf8" "github.com/prometheus/common/model" ) // Labels represents a collection of label name -> value mappings. This type is // commonly used with the With(Labels) and GetMetricWith(Labels) methods of // metric vector Collectors, e.g.: // myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // // The other use-case is the specification of constant label pairs in Opts or to // create a Desc. type Labels map[string]string // reservedLabelPrefix is a prefix which is not legal in user-supplied // label names. const reservedLabelPrefix = "__" var errInconsistentCardinality = errors.New("inconsistent label cardinality") func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { return fmt.Errorf( "%s: %q has %d variable labels named %q but %d values %q were provided", errInconsistentCardinality, fqName, len(labels), labels, len(labelValues), labelValues, ) } func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { if len(labels) != expectedNumberOfValues { return fmt.Errorf( "%s: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(labels), labels, ) } for name, val := range labels { if !utf8.ValidString(val) { return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) } } return nil } func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { return fmt.Errorf( "%s: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, len(vals), vals, ) } for _, val := range vals { if !utf8.ValidString(val) { return fmt.Errorf("label value %q is not valid UTF-8", val) } } return nil } func checkLabelName(l string) bool { return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) } client_golang-1.11.0/prometheus/metric.go000066400000000000000000000151471405741072000204140ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "strings" "time" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. // A Metric models a single sample value with its meta data being exported to // Prometheus. Implementations of Metric in this package are Gauge, Counter, // Histogram, Summary, and Untyped. type Metric interface { // Desc returns the descriptor for the Metric. This method idempotently // returns the same descriptor throughout the lifetime of the // Metric. The returned descriptor is immutable by contract. A Metric // unable to describe itself must return an invalid descriptor (created // with NewInvalidDesc). Desc() *Desc // Write encodes the Metric into a "Metric" Protocol Buffer data // transmission object. // // Metric implementations must observe concurrency safety as reads of // this metric may occur at any time, and any blocking occurs at the // expense of total performance of rendering all registered // metrics. Ideally, Metric implementations should support concurrent // readers. // // While populating dto.Metric, it is the responsibility of the // implementation to ensure validity of the Metric protobuf (like valid // UTF-8 strings or syntactically valid metric and label names). It is // recommended to sort labels lexicographically. Callers of Write should // still make sure of sorting if they depend on it. Write(*dto.Metric) error // TODO(beorn7): The original rationale of passing in a pre-allocated // dto.Metric protobuf to save allocations has disappeared. The // signature of this method should be changed to "Write() (*dto.Metric, // error)". } // Opts bundles the options for creating most Metric types. Each metric // implementation XXX has its own XXXOpts type, but in most cases, it is just // an alias of this type (which might change when the requirement arises.) // // It is mandatory to set Name to a non-empty string. All other fields are // optional and can safely be left at their zero value, although it is strongly // encouraged to set a Help string. type Opts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Metric (created by joining these components with // "_"). Only Name is mandatory, the others merely help structuring the // name. Note that the fully-qualified name of the metric must be a // valid Prometheus metric name. Namespace string Subsystem string Name string // Help provides information about this metric. // // Metrics with the same fully-qualified name must have the same Help // string. Help string // ConstLabels are used to attach fixed labels to this metric. Metrics // with the same fully-qualified name must have the same label names in // their ConstLabels. // // ConstLabels are only used rarely. In particular, do not use them to // attach the same labels to all your metrics. Those use cases are // better covered by target labels set by the scraping Prometheus // server, or by one specific metric (e.g. a build_info or a // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels } // BuildFQName joins the given three name components by "_". Empty name // components are ignored. If the name parameter itself is empty, an empty // string is returned, no matter what. Metric implementations included in this // library use this function internally to generate the fully-qualified metric // name from the name component in their Opts. Users of the library will only // need this function if they implement their own Metric or instantiate a Desc // (with NewDesc) directly. func BuildFQName(namespace, subsystem, name string) string { if name == "" { return "" } switch { case namespace != "" && subsystem != "": return strings.Join([]string{namespace, subsystem, name}, "_") case namespace != "": return strings.Join([]string{namespace, name}, "_") case subsystem != "": return strings.Join([]string{subsystem, name}, "_") } return name } // labelPairSorter implements sort.Interface. It is used to sort a slice of // dto.LabelPair pointers. type labelPairSorter []*dto.LabelPair func (s labelPairSorter) Len() int { return len(s) } func (s labelPairSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s labelPairSorter) Less(i, j int) bool { return s[i].GetName() < s[j].GetName() } type invalidMetric struct { desc *Desc err error } // NewInvalidMetric returns a metric whose Write method always returns the // provided error. It is useful if a Collector finds itself unable to collect // a metric and wishes to report an error to the registry. func NewInvalidMetric(desc *Desc, err error) Metric { return &invalidMetric{desc, err} } func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Write(*dto.Metric) error { return m.err } type timestampedMetric struct { Metric t time.Time } func (m timestampedMetric) Write(pb *dto.Metric) error { e := m.Metric.Write(pb) pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) return e } // NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a // way that it has an explicit timestamp set to the provided Time. This is only // useful in rare cases as the timestamp of a Prometheus metric should usually // be set by the Prometheus server during scraping. Exceptions include mirroring // metrics with given timestamps from other metric // sources. // // NewMetricWithTimestamp works best with MustNewConstMetric, // MustNewConstHistogram, and MustNewConstSummary, see example. // // Currently, the exposition formats used by Prometheus are limited to // millisecond resolution. Thus, the provided time will be rounded down to the // next full millisecond value. func NewMetricWithTimestamp(t time.Time, m Metric) Metric { return timestampedMetric{Metric: m, t: t} } client_golang-1.11.0/prometheus/metric_test.go000066400000000000000000000021201405741072000214360ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import "testing" func TestBuildFQName(t *testing.T) { scenarios := []struct{ namespace, subsystem, name, result string }{ {"a", "b", "c", "a_b_c"}, {"", "b", "c", "b_c"}, {"a", "", "c", "a_c"}, {"", "", "c", "c"}, {"a", "b", "", ""}, {"a", "", "", ""}, {"", "b", "", ""}, {" ", "", "", ""}, } for i, s := range scenarios { if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { t.Errorf("%d. want %s, got %s", i, want, got) } } } client_golang-1.11.0/prometheus/observer.go000066400000000000000000000050261405741072000207530ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus // Observer is the interface that wraps the Observe method, which is used by // Histogram and Summary to add observations. type Observer interface { Observe(float64) } // The ObserverFunc type is an adapter to allow the use of ordinary // functions as Observers. If f is a function with the appropriate // signature, ObserverFunc(f) is an Observer that calls f. // // This adapter is usually used in connection with the Timer type, and there are // two general use cases: // // The most common one is to use a Gauge as the Observer for a Timer. // See the "Gauge" Timer example. // // The more advanced use case is to create a function that dynamically decides // which Observer to use for observing the duration. See the "Complex" Timer // example. type ObserverFunc func(float64) // Observe calls f(value). It implements Observer. func (f ObserverFunc) Observe(value float64) { f(value) } // ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. type ObserverVec interface { GetMetricWith(Labels) (Observer, error) GetMetricWithLabelValues(lvs ...string) (Observer, error) With(Labels) Observer WithLabelValues(...string) Observer CurryWith(Labels) (ObserverVec, error) MustCurryWith(Labels) ObserverVec Collector } // ExemplarObserver is implemented by Observers that offer the option of // observing a value together with an exemplar. Its ObserveWithExemplar method // works like the Observe method of an Observer but also replaces the currently // saved exemplar (if any) with a new one, created from the provided value, the // current time as timestamp, and the provided Labels. Empty Labels will lead to // a valid (label-less) exemplar. But if Labels is nil, the current exemplar is // left in place. ObserveWithExemplar panics if any of the provided labels are // invalid or if the provided labels contain more than 64 runes in total. type ExemplarObserver interface { ObserveWithExemplar(value float64, exemplar Labels) } client_golang-1.11.0/prometheus/process_collector.go000066400000000000000000000112171405741072000226470ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "errors" "fmt" "io/ioutil" "os" "strconv" "strings" ) type processCollector struct { collectFn func(chan<- Metric) pidFn func() (int, error) reportErrors bool cpuTotal *Desc openFDs, maxFDs *Desc vsize, maxVsize *Desc rss *Desc startTime *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector // created with NewProcessCollector. type ProcessCollectorOpts struct { // PidFn returns the PID of the process the collector collects metrics // for. It is called upon each collection. By default, the PID of the // current process is used, as determined on construction time by // calling os.Getpid(). PidFn func() (int, error) // If non-empty, each of the collected metrics is prefixed by the // provided string and an underscore ("_"). Namespace string // If true, any error encountered during collection is reported as an // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored // and the collected metrics will be incomplete. (Possibly, no metrics // will be collected at all.) While that's usually not desired, it is // appropriate for the common "mix-in" of process metrics, where process // metrics are nice to have, but failing to collect them should not // disrupt the collection of the remaining metrics. ReportErrors bool } // NewProcessCollector is the obsolete version of collectors.NewProcessCollector. // See there for documentation. // // Deprecated: Use collectors.NewProcessCollector instead. func NewProcessCollector(opts ProcessCollectorOpts) Collector { ns := "" if len(opts.Namespace) > 0 { ns = opts.Namespace + "_" } c := &processCollector{ reportErrors: opts.ReportErrors, cpuTotal: NewDesc( ns+"process_cpu_seconds_total", "Total user and system CPU time spent in seconds.", nil, nil, ), openFDs: NewDesc( ns+"process_open_fds", "Number of open file descriptors.", nil, nil, ), maxFDs: NewDesc( ns+"process_max_fds", "Maximum number of open file descriptors.", nil, nil, ), vsize: NewDesc( ns+"process_virtual_memory_bytes", "Virtual memory size in bytes.", nil, nil, ), maxVsize: NewDesc( ns+"process_virtual_memory_max_bytes", "Maximum amount of virtual memory available in bytes.", nil, nil, ), rss: NewDesc( ns+"process_resident_memory_bytes", "Resident memory size in bytes.", nil, nil, ), startTime: NewDesc( ns+"process_start_time_seconds", "Start time of the process since unix epoch in seconds.", nil, nil, ), } if opts.PidFn == nil { pid := os.Getpid() c.pidFn = func() (int, error) { return pid, nil } } else { c.pidFn = opts.PidFn } // Set up process metric collection if supported by the runtime. if canCollectProcess() { c.collectFn = c.processCollect } else { c.collectFn = func(ch chan<- Metric) { c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) } } return c } // Describe returns all descriptions of the collector. func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.cpuTotal ch <- c.openFDs ch <- c.maxFDs ch <- c.vsize ch <- c.maxVsize ch <- c.rss ch <- c.startTime } // Collect returns the current state of all metrics of the collector. func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return } if desc == nil { desc = NewInvalidDesc(err) } ch <- NewInvalidMetric(desc, err) } // NewPidFileFn returns a function that retrieves a pid from the specified file. // It is meant to be used for the PidFn field in ProcessCollectorOpts. func NewPidFileFn(pidFilePath string) func() (int, error) { return func() (int, error) { content, err := ioutil.ReadFile(pidFilePath) if err != nil { return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) } pid, err := strconv.Atoi(strings.TrimSpace(string(content))) if err != nil { return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) } return pid, nil } } client_golang-1.11.0/prometheus/process_collector_other.go000066400000000000000000000035471405741072000240570ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !windows package prometheus import ( "github.com/prometheus/procfs" ) func canCollectProcess() bool { _, err := procfs.NewDefaultFS() return err == nil } func (c *processCollector) processCollect(ch chan<- Metric) { pid, err := c.pidFn() if err != nil { c.reportError(ch, nil, err) return } p, err := procfs.NewProc(pid) if err != nil { c.reportError(ch, nil, err) return } if stat, err := p.Stat(); err == nil { ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) if startTime, err := stat.StartTime(); err == nil { ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) } else { c.reportError(ch, c.startTime, err) } } else { c.reportError(ch, nil, err) } if fds, err := p.FileDescriptorsLen(); err == nil { ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) } else { c.reportError(ch, c.openFDs, err) } if limits, err := p.Limits(); err == nil { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) } else { c.reportError(ch, nil, err) } } client_golang-1.11.0/prometheus/process_collector_test.go000066400000000000000000000107511405741072000237100ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build linux package prometheus import ( "bytes" "errors" "fmt" "os" "path/filepath" "regexp" "strings" "testing" "github.com/prometheus/common/expfmt" "github.com/prometheus/procfs" dto "github.com/prometheus/client_model/go" ) func TestProcessCollector(t *testing.T) { if _, err := procfs.Self(); err != nil { t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) } registry := NewRegistry() if err := registry.Register(NewProcessCollector(ProcessCollectorOpts{})); err != nil { t.Fatal(err) } if err := registry.Register(NewProcessCollector(ProcessCollectorOpts{ PidFn: func() (int, error) { return os.Getpid(), nil }, Namespace: "foobar", ReportErrors: true, // No errors expected, just to see if none are reported. })); err != nil { t.Fatal(err) } mfs, err := registry.Gather() if err != nil { t.Fatal(err) } var buf bytes.Buffer for _, mf := range mfs { if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { t.Fatal(err) } } for _, re := range []*regexp.Regexp{ regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"), regexp.MustCompile("\nprocess_max_fds [1-9]"), regexp.MustCompile("\nprocess_open_fds [1-9]"), regexp.MustCompile("\nprocess_virtual_memory_max_bytes (-1|[1-9])"), regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"), regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"), regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"), regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"), regexp.MustCompile("\nfoobar_process_max_fds [1-9]"), regexp.MustCompile("\nfoobar_process_open_fds [1-9]"), regexp.MustCompile("\nfoobar_process_virtual_memory_max_bytes (-1|[1-9])"), regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"), regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"), regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"), } { if !re.Match(buf.Bytes()) { t.Errorf("want body to match %s\n%s", re, buf.String()) } } brokenProcessCollector := NewProcessCollector(ProcessCollectorOpts{ PidFn: func() (int, error) { return 0, errors.New("boo") }, ReportErrors: true, }) ch := make(chan Metric) go func() { brokenProcessCollector.Collect(ch) close(ch) }() n := 0 for m := range ch { n++ pb := &dto.Metric{} err := m.Write(pb) if err == nil { t.Error("metric collected from broken process collector is unexpectedly valid") } } if n != 1 { t.Errorf("%d metrics collected, want 1", n) } } func TestNewPidFileFn(t *testing.T) { folderPath, err := os.Getwd() if err != nil { t.Error("failed to get current path") } mockPidFilePath := filepath.Join(folderPath, "mockPidFile") defer os.Remove(mockPidFilePath) testCases := []struct { mockPidFile func() expectedErrPrefix string expectedPid int desc string }{ { mockPidFile: func() { os.Remove(mockPidFilePath) }, expectedErrPrefix: "can't read pid file", expectedPid: 0, desc: "no existed pid file", }, { mockPidFile: func() { os.Remove(mockPidFilePath) f, _ := os.Create(mockPidFilePath) f.Write([]byte("abc")) f.Close() }, expectedErrPrefix: "can't parse pid file", expectedPid: 0, desc: "existed pid file, error pid number", }, { mockPidFile: func() { os.Remove(mockPidFilePath) f, _ := os.Create(mockPidFilePath) f.Write([]byte("123")) f.Close() }, expectedErrPrefix: "", expectedPid: 123, desc: "existed pid file, correct pid number", }, } for _, tc := range testCases { fn := NewPidFileFn(mockPidFilePath) if fn == nil { t.Error("Should not get nil PidFileFn") } tc.mockPidFile() if pid, err := fn(); pid != tc.expectedPid || (err != nil && !strings.HasPrefix(err.Error(), tc.expectedErrPrefix)) { fmt.Println(err.Error()) t.Error(tc.desc) } } } client_golang-1.11.0/prometheus/process_collector_windows.go000066400000000000000000000066101405741072000244220ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "syscall" "unsafe" "golang.org/x/sys/windows" ) func canCollectProcess() bool { return true } var ( modpsapi = syscall.NewLazyDLL("psapi.dll") modkernel32 = syscall.NewLazyDLL("kernel32.dll") procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") ) type processMemoryCounters struct { // System interface description // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex // Refer to the Golang internal implementation // https://golang.org/src/internal/syscall/windows/psapi_windows.go _ uint32 PageFaultCount uint32 PeakWorkingSetSize uintptr WorkingSetSize uintptr QuotaPeakPagedPoolUsage uintptr QuotaPagedPoolUsage uintptr QuotaPeakNonPagedPoolUsage uintptr QuotaNonPagedPoolUsage uintptr PagefileUsage uintptr PeakPagefileUsage uintptr PrivateUsage uintptr } func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { mem := processMemoryCounters{} r1, _, err := procGetProcessMemoryInfo.Call( uintptr(handle), uintptr(unsafe.Pointer(&mem)), uintptr(unsafe.Sizeof(mem)), ) if r1 != 1 { return mem, err } else { return mem, nil } } func getProcessHandleCount(handle windows.Handle) (uint32, error) { var count uint32 r1, _, err := procGetProcessHandleCount.Call( uintptr(handle), uintptr(unsafe.Pointer(&count)), ) if r1 != 1 { return 0, err } else { return count, nil } } func (c *processCollector) processCollect(ch chan<- Metric) { h, err := windows.GetCurrentProcess() if err != nil { c.reportError(ch, nil, err) return } var startTime, exitTime, kernelTime, userTime windows.Filetime err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) if err != nil { c.reportError(ch, nil, err) return } ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) mem, err := getProcessMemoryInfo(h) if err != nil { c.reportError(ch, nil, err) return } ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) handles, err := getProcessHandleCount(h) if err != nil { c.reportError(ch, nil, err) return } ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. } func fileTimeToSeconds(ft windows.Filetime) float64 { return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 } client_golang-1.11.0/prometheus/process_collector_windows_test.go000066400000000000000000000045341405741072000254640ustar00rootroot00000000000000// Copyright 2019 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "bytes" "os" "regexp" "testing" "github.com/prometheus/common/expfmt" ) func TestWindowsProcessCollector(t *testing.T) { registry := NewRegistry() if err := registry.Register(NewProcessCollector(ProcessCollectorOpts{})); err != nil { t.Fatal(err) } if err := registry.Register(NewProcessCollector(ProcessCollectorOpts{ PidFn: func() (int, error) { return os.Getpid(), nil }, Namespace: "foobar", ReportErrors: true, // No errors expected, just to see if none are reported. })); err != nil { t.Fatal(err) } mfs, err := registry.Gather() if err != nil { t.Fatal(err) } var buf bytes.Buffer for _, mf := range mfs { if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { t.Fatal(err) } } for _, re := range []*regexp.Regexp{ regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"), regexp.MustCompile("\nprocess_max_fds [1-9]"), regexp.MustCompile("\nprocess_open_fds [1-9]"), regexp.MustCompile("\nprocess_virtual_memory_max_bytes (-1|[1-9])"), regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"), regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"), regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"), regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"), regexp.MustCompile("\nfoobar_process_max_fds [1-9]"), regexp.MustCompile("\nfoobar_process_open_fds [1-9]"), regexp.MustCompile("\nfoobar_process_virtual_memory_max_bytes (-1|[1-9])"), regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"), regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"), regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"), } { if !re.Match(buf.Bytes()) { t.Errorf("want body to match %s\n%s", re, buf.String()) } } } client_golang-1.11.0/prometheus/promauto/000077500000000000000000000000001405741072000204405ustar00rootroot00000000000000client_golang-1.11.0/prometheus/promauto/auto.go000066400000000000000000000371431405741072000217470ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package promauto provides alternative constructors for the fundamental // Prometheus metric types and their …Vec and …Func variants. The difference to // their counterparts in the prometheus package is that the promauto // constructors return Collectors that are already registered with a // registry. There are two sets of constructors. The constructors in the first // set are top-level functions, while the constructors in the other set are // methods of the Factory type. The top-level function return Collectors // registered with the global registry (prometheus.DefaultRegisterer), while the // methods return Collectors registered with the registry the Factory was // constructed with. All constructors panic if the registration fails. // // The following example is a complete program to create a histogram of normally // distributed random numbers from the math/rand package: // // package main // // import ( // "math/rand" // "net/http" // // "github.com/prometheus/client_golang/prometheus" // "github.com/prometheus/client_golang/prometheus/promauto" // "github.com/prometheus/client_golang/prometheus/promhttp" // ) // // var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ // Name: "random_numbers", // Help: "A histogram of normally distributed random numbers.", // Buckets: prometheus.LinearBuckets(-3, .1, 61), // }) // // func Random() { // for { // histogram.Observe(rand.NormFloat64()) // } // } // // func main() { // go Random() // http.Handle("/metrics", promhttp.Handler()) // http.ListenAndServe(":1971", nil) // } // // Prometheus's version of a minimal hello-world program: // // package main // // import ( // "fmt" // "net/http" // // "github.com/prometheus/client_golang/prometheus" // "github.com/prometheus/client_golang/prometheus/promauto" // "github.com/prometheus/client_golang/prometheus/promhttp" // ) // // func main() { // http.Handle("/", promhttp.InstrumentHandlerCounter( // promauto.NewCounterVec( // prometheus.CounterOpts{ // Name: "hello_requests_total", // Help: "Total number of hello-world requests by HTTP code.", // }, // []string{"code"}, // ), // http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // fmt.Fprint(w, "Hello, world!") // }), // )) // http.Handle("/metrics", promhttp.Handler()) // http.ListenAndServe(":1971", nil) // } // // A Factory is created with the With(prometheus.Registerer) function, which // enables two usage pattern. With(prometheus.Registerer) can be called once per // line: // // var ( // reg = prometheus.NewRegistry() // randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ // Name: "random_numbers", // Help: "A histogram of normally distributed random numbers.", // Buckets: prometheus.LinearBuckets(-3, .1, 61), // }) // requestCount = promauto.With(reg).NewCounterVec( // prometheus.CounterOpts{ // Name: "http_requests_total", // Help: "Total number of HTTP requests by status code and method.", // }, // []string{"code", "method"}, // ) // ) // // Or it can be used to create a Factory once to be used multiple times: // // var ( // reg = prometheus.NewRegistry() // factory = promauto.With(reg) // randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ // Name: "random_numbers", // Help: "A histogram of normally distributed random numbers.", // Buckets: prometheus.LinearBuckets(-3, .1, 61), // }) // requestCount = factory.NewCounterVec( // prometheus.CounterOpts{ // Name: "http_requests_total", // Help: "Total number of HTTP requests by status code and method.", // }, // []string{"code", "method"}, // ) // ) // // This appears very handy. So why are these constructors locked away in a // separate package? // // The main problem is that registration may fail, e.g. if a metric inconsistent // with or equal to the newly to be registered one is already registered. // Therefore, the Register method in the prometheus.Registerer interface returns // an error, and the same is the case for the top-level prometheus.Register // function that registers with the global registry. The prometheus package also // provides MustRegister versions for both. They panic if the registration // fails, and they clearly call this out by using the Must… idiom. Panicking is // problematic in this case because it doesn't just happen on input provided by // the caller that is invalid on its own. Things are a bit more subtle here: // Metric creation and registration tend to be spread widely over the // codebase. It can easily happen that an incompatible metric is added to an // unrelated part of the code, and suddenly code that used to work perfectly // fine starts to panic (provided that the registration of the newly added // metric happens before the registration of the previously existing // metric). This may come as an even bigger surprise with the global registry, // where simply importing another package can trigger a panic (if the newly // imported package registers metrics in its init function). At least, in the // prometheus package, creation of metrics and other collectors is separate from // registration. You first create the metric, and then you decide explicitly if // you want to register it with a local or the global registry, and if you want // to handle the error or risk a panic. With the constructors in the promauto // package, registration is automatic, and if it fails, it will always // panic. Furthermore, the constructors will often be called in the var section // of a file, which means that panicking will happen as a side effect of merely // importing a package. // // A separate package allows conservative users to entirely ignore it. And // whoever wants to use it, will do so explicitly, with an opportunity to read // this warning. // // Enjoy promauto responsibly! package promauto import "github.com/prometheus/client_golang/prometheus" // NewCounter works like the function of the same name in the prometheus package // but it automatically registers the Counter with the // prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { return With(prometheus.DefaultRegisterer).NewCounter(opts) } // NewCounterVec works like the function of the same name in the prometheus // package but it automatically registers the CounterVec with the // prometheus.DefaultRegisterer. If the registration fails, NewCounterVec // panics. func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames) } // NewCounterFunc works like the function of the same name in the prometheus // package but it automatically registers the CounterFunc with the // prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc // panics. func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function) } // NewGauge works like the function of the same name in the prometheus package // but it automatically registers the Gauge with the // prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { return With(prometheus.DefaultRegisterer).NewGauge(opts) } // NewGaugeVec works like the function of the same name in the prometheus // package but it automatically registers the GaugeVec with the // prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames) } // NewGaugeFunc works like the function of the same name in the prometheus // package but it automatically registers the GaugeFunc with the // prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function) } // NewSummary works like the function of the same name in the prometheus package // but it automatically registers the Summary with the // prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { return With(prometheus.DefaultRegisterer).NewSummary(opts) } // NewSummaryVec works like the function of the same name in the prometheus // package but it automatically registers the SummaryVec with the // prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec // panics. func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames) } // NewHistogram works like the function of the same name in the prometheus // package but it automatically registers the Histogram with the // prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { return With(prometheus.DefaultRegisterer).NewHistogram(opts) } // NewHistogramVec works like the function of the same name in the prometheus // package but it automatically registers the HistogramVec with the // prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec // panics. func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames) } // NewUntypedFunc works like the function of the same name in the prometheus // package but it automatically registers the UntypedFunc with the // prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc // panics. func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function) } // Factory provides factory methods to create Collectors that are automatically // registered with a Registerer. Create a Factory with the With function, // providing a Registerer to auto-register created Collectors with. The zero // value of a Factory creates Collectors that are not registered with any // Registerer. All methods of the Factory panic if the registration fails. type Factory struct { r prometheus.Registerer } // With creates a Factory using the provided Registerer for registration of the // created Collectors. If the provided Registerer is nil, the returned Factory // creates Collectors that are not registered with any Registerer. func With(r prometheus.Registerer) Factory { return Factory{r} } // NewCounter works like the function of the same name in the prometheus package // but it automatically registers the Counter with the Factory's Registerer. func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter { c := prometheus.NewCounter(opts) if f.r != nil { f.r.MustRegister(c) } return c } // NewCounterVec works like the function of the same name in the prometheus // package but it automatically registers the CounterVec with the Factory's // Registerer. func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { c := prometheus.NewCounterVec(opts, labelNames) if f.r != nil { f.r.MustRegister(c) } return c } // NewCounterFunc works like the function of the same name in the prometheus // package but it automatically registers the CounterFunc with the Factory's // Registerer. func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { c := prometheus.NewCounterFunc(opts, function) if f.r != nil { f.r.MustRegister(c) } return c } // NewGauge works like the function of the same name in the prometheus package // but it automatically registers the Gauge with the Factory's Registerer. func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { g := prometheus.NewGauge(opts) if f.r != nil { f.r.MustRegister(g) } return g } // NewGaugeVec works like the function of the same name in the prometheus // package but it automatically registers the GaugeVec with the Factory's // Registerer. func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { g := prometheus.NewGaugeVec(opts, labelNames) if f.r != nil { f.r.MustRegister(g) } return g } // NewGaugeFunc works like the function of the same name in the prometheus // package but it automatically registers the GaugeFunc with the Factory's // Registerer. func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { g := prometheus.NewGaugeFunc(opts, function) if f.r != nil { f.r.MustRegister(g) } return g } // NewSummary works like the function of the same name in the prometheus package // but it automatically registers the Summary with the Factory's Registerer. func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { s := prometheus.NewSummary(opts) if f.r != nil { f.r.MustRegister(s) } return s } // NewSummaryVec works like the function of the same name in the prometheus // package but it automatically registers the SummaryVec with the Factory's // Registerer. func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { s := prometheus.NewSummaryVec(opts, labelNames) if f.r != nil { f.r.MustRegister(s) } return s } // NewHistogram works like the function of the same name in the prometheus // package but it automatically registers the Histogram with the Factory's // Registerer. func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { h := prometheus.NewHistogram(opts) if f.r != nil { f.r.MustRegister(h) } return h } // NewHistogramVec works like the function of the same name in the prometheus // package but it automatically registers the HistogramVec with the Factory's // Registerer. func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { h := prometheus.NewHistogramVec(opts, labelNames) if f.r != nil { f.r.MustRegister(h) } return h } // NewUntypedFunc works like the function of the same name in the prometheus // package but it automatically registers the UntypedFunc with the Factory's // Registerer. func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { u := prometheus.NewUntypedFunc(opts, function) if f.r != nil { f.r.MustRegister(u) } return u } client_golang-1.11.0/prometheus/promauto/auto_test.go000066400000000000000000000015151405741072000230000ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package promauto import ( "testing" "github.com/prometheus/client_golang/prometheus" ) func TestNil(t *testing.T) { // A nil registerer should be treated as a no-op by promauto. With(nil).NewCounter(prometheus.CounterOpts{Name: "test"}).Inc() } client_golang-1.11.0/prometheus/promhttp/000077500000000000000000000000001405741072000204475ustar00rootroot00000000000000client_golang-1.11.0/prometheus/promhttp/delegator.go000066400000000000000000000266411405741072000227550ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package promhttp import ( "bufio" "io" "net" "net/http" ) const ( closeNotifier = 1 << iota flusher hijacker readerFrom pusher ) type delegator interface { http.ResponseWriter Status() int Written() int64 } type responseWriterDelegator struct { http.ResponseWriter status int written int64 wroteHeader bool observeWriteHeader func(int) } func (r *responseWriterDelegator) Status() int { return r.status } func (r *responseWriterDelegator) Written() int64 { return r.written } func (r *responseWriterDelegator) WriteHeader(code int) { if r.observeWriteHeader != nil && !r.wroteHeader { // Only call observeWriteHeader for the 1st time. It's a bug if // WriteHeader is called more than once, but we want to protect // against it here. Note that we still delegate the WriteHeader // to the original ResponseWriter to not mask the bug from it. r.observeWriteHeader(code) } r.status = code r.wroteHeader = true r.ResponseWriter.WriteHeader(code) } func (r *responseWriterDelegator) Write(b []byte) (int, error) { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. if !r.wroteHeader { r.WriteHeader(http.StatusOK) } n, err := r.ResponseWriter.Write(b) r.written += int64(n) return n, err } type closeNotifierDelegator struct{ *responseWriterDelegator } type flusherDelegator struct{ *responseWriterDelegator } type hijackerDelegator struct{ *responseWriterDelegator } type readerFromDelegator struct{ *responseWriterDelegator } type pusherDelegator struct{ *responseWriterDelegator } func (d closeNotifierDelegator) CloseNotify() <-chan bool { //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. return d.ResponseWriter.(http.CloseNotifier).CloseNotify() } func (d flusherDelegator) Flush() { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. if !d.wroteHeader { d.WriteHeader(http.StatusOK) } d.ResponseWriter.(http.Flusher).Flush() } func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { return d.ResponseWriter.(http.Hijacker).Hijack() } func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { // If applicable, call WriteHeader here so that observeWriteHeader is // handled appropriately. if !d.wroteHeader { d.WriteHeader(http.StatusOK) } n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) d.written += n return n, err } func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { return d.ResponseWriter.(http.Pusher).Push(target, opts) } var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) func init() { // TODO(beorn7): Code generation would help here. pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 return d } pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 return closeNotifierDelegator{d} } pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 return flusherDelegator{d} } pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 return struct { *responseWriterDelegator http.Flusher http.CloseNotifier }{d, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 return hijackerDelegator{d} } pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 return struct { *responseWriterDelegator http.Hijacker http.CloseNotifier }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 return struct { *responseWriterDelegator http.Hijacker http.Flusher }{d, hijackerDelegator{d}, flusherDelegator{d}} } pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 return struct { *responseWriterDelegator http.Hijacker http.Flusher http.CloseNotifier }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 return readerFromDelegator{d} } pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 return struct { *responseWriterDelegator io.ReaderFrom http.CloseNotifier }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 return struct { *responseWriterDelegator io.ReaderFrom http.Flusher }{d, readerFromDelegator{d}, flusherDelegator{d}} } pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 return struct { *responseWriterDelegator io.ReaderFrom http.Flusher http.CloseNotifier }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 return struct { *responseWriterDelegator io.ReaderFrom http.Hijacker }{d, readerFromDelegator{d}, hijackerDelegator{d}} } pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 return struct { *responseWriterDelegator io.ReaderFrom http.Hijacker http.CloseNotifier }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 return struct { *responseWriterDelegator io.ReaderFrom http.Hijacker http.Flusher }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 return struct { *responseWriterDelegator io.ReaderFrom http.Hijacker http.Flusher http.CloseNotifier }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 return pusherDelegator{d} } pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 return struct { *responseWriterDelegator http.Pusher http.CloseNotifier }{d, pusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 return struct { *responseWriterDelegator http.Pusher http.Flusher }{d, pusherDelegator{d}, flusherDelegator{d}} } pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 return struct { *responseWriterDelegator http.Pusher http.Flusher http.CloseNotifier }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 return struct { *responseWriterDelegator http.Pusher http.Hijacker }{d, pusherDelegator{d}, hijackerDelegator{d}} } pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 return struct { *responseWriterDelegator http.Pusher http.Hijacker http.CloseNotifier }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 return struct { *responseWriterDelegator http.Pusher http.Hijacker http.Flusher }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 return struct { *responseWriterDelegator http.Pusher http.Hijacker http.Flusher http.CloseNotifier }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom }{d, pusherDelegator{d}, readerFromDelegator{d}} } pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom http.CloseNotifier }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom http.Flusher }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} } pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom http.Flusher http.CloseNotifier }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom http.Hijacker }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} } pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom http.Hijacker http.CloseNotifier }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} } pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom http.Hijacker http.Flusher }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} } pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 return struct { *responseWriterDelegator http.Pusher io.ReaderFrom http.Hijacker http.Flusher http.CloseNotifier }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} } } func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { d := &responseWriterDelegator{ ResponseWriter: w, observeWriteHeader: observeWriteHeaderFunc, } id := 0 //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. if _, ok := w.(http.CloseNotifier); ok { id += closeNotifier } if _, ok := w.(http.Flusher); ok { id += flusher } if _, ok := w.(http.Hijacker); ok { id += hijacker } if _, ok := w.(io.ReaderFrom); ok { id += readerFrom } if _, ok := w.(http.Pusher); ok { id += pusher } return pickDelegator[id](d) } client_golang-1.11.0/prometheus/promhttp/http.go000066400000000000000000000350041405741072000217570ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package promhttp provides tooling around HTTP servers and clients. // // First, the package allows the creation of http.Handler instances to expose // Prometheus metrics via HTTP. promhttp.Handler acts on the // prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a // custom registry or anything that implements the Gatherer interface. It also // allows the creation of handlers that act differently on errors or allow to // log errors. // // Second, the package provides tooling to instrument instances of http.Handler // via middleware. Middleware wrappers follow the naming scheme // InstrumentHandlerX, where X describes the intended use of the middleware. // See each function's doc comment for specific details. // // Finally, the package allows for an http.RoundTripper to be instrumented via // middleware. Middleware wrappers follow the naming scheme // InstrumentRoundTripperX, where X describes the intended use of the // middleware. See each function's doc comment for specific details. package promhttp import ( "compress/gzip" "fmt" "io" "net/http" "strings" "sync" "time" "github.com/prometheus/common/expfmt" "github.com/prometheus/client_golang/prometheus" ) const ( contentTypeHeader = "Content-Type" contentEncodingHeader = "Content-Encoding" acceptEncodingHeader = "Accept-Encoding" ) var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) }, } // Handler returns an http.Handler for the prometheus.DefaultGatherer, using // default HandlerOpts, i.e. it reports the first error as an HTTP error, it has // no error logging, and it applies compression if requested by the client. // // The returned http.Handler is already instrumented using the // InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you // create multiple http.Handlers by separate calls of the Handler function, the // metrics used for instrumentation will be shared between them, providing // global scrape counts. // // This function is meant to cover the bulk of basic use cases. If you are doing // anything that requires more customization (including using a non-default // Gatherer, different instrumentation, and non-default HandlerOpts), use the // HandlerFor function. See there for details. func Handler() http.Handler { return InstrumentMetricHandler( prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), ) } // HandlerFor returns an uninstrumented http.Handler for the provided // Gatherer. The behavior of the Handler is defined by the provided // HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom // Gatherers, with non-default HandlerOpts, and/or with custom (or no) // instrumentation. Use the InstrumentMetricHandler function to apply the same // kind of instrumentation as it is used by the Handler function. func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { var ( inFlightSem chan struct{} errCnt = prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "promhttp_metric_handler_errors_total", Help: "Total number of internal errors encountered by the promhttp metric handler.", }, []string{"cause"}, ) ) if opts.MaxRequestsInFlight > 0 { inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) } if opts.Registry != nil { // Initialize all possibilities that can occur below. errCnt.WithLabelValues("gathering") errCnt.WithLabelValues("encoding") if err := opts.Registry.Register(errCnt); err != nil { if are, ok := err.(prometheus.AlreadyRegisteredError); ok { errCnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) } } } h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if inFlightSem != nil { select { case inFlightSem <- struct{}{}: // All good, carry on. defer func() { <-inFlightSem }() default: http.Error(rsp, fmt.Sprintf( "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, ), http.StatusServiceUnavailable) return } } mfs, err := reg.Gather() if err != nil { if opts.ErrorLog != nil { opts.ErrorLog.Println("error gathering metrics:", err) } errCnt.WithLabelValues("gathering").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) case ContinueOnError: if len(mfs) == 0 { // Still report the error if no metrics have been gathered. httpError(rsp, err) return } case HTTPErrorOnError: httpError(rsp, err) return } } var contentType expfmt.Format if opts.EnableOpenMetrics { contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) } else { contentType = expfmt.Negotiate(req.Header) } header := rsp.Header() header.Set(contentTypeHeader, string(contentType)) w := io.Writer(rsp) if !opts.DisableCompression && gzipAccepted(req.Header) { header.Set(contentEncodingHeader, "gzip") gz := gzipPool.Get().(*gzip.Writer) defer gzipPool.Put(gz) gz.Reset(w) defer gz.Close() w = gz } enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling // and returns true if we have to abort after the handling. handleError := func(err error) bool { if err == nil { return false } if opts.ErrorLog != nil { opts.ErrorLog.Println("error encoding and sending metric family:", err) } errCnt.WithLabelValues("encoding").Inc() switch opts.ErrorHandling { case PanicOnError: panic(err) case HTTPErrorOnError: // We cannot really send an HTTP error at this // point because we most likely have written // something to rsp already. But at least we can // stop sending. return true } // Do nothing in all other cases, including ContinueOnError. return false } for _, mf := range mfs { if handleError(enc.Encode(mf)) { return } } if closer, ok := enc.(expfmt.Closer); ok { // This in particular takes care of the final "# EOF\n" line for OpenMetrics. if handleError(closer.Close()) { return } } }) if opts.Timeout <= 0 { return h } return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( "Exceeded configured timeout of %v.\n", opts.Timeout, )) } // InstrumentMetricHandler is usually used with an http.Handler returned by the // HandlerFor function. It instruments the provided http.Handler with two // metrics: A counter vector "promhttp_metric_handler_requests_total" to count // scrapes partitioned by HTTP status code, and a gauge // "promhttp_metric_handler_requests_in_flight" to track the number of // simultaneous scrapes. This function idempotently registers collectors for // both metrics with the provided Registerer. It panics if the registration // fails. The provided metrics are useful to see how many scrapes hit the // monitored target (which could be from different Prometheus servers or other // scrapers), and how often they overlap (which would result in more than one // scrape in flight at the same time). Note that the scrapes-in-flight gauge // will contain the scrape by which it is exposed, while the scrape counter will // only get incremented after the scrape is complete (as only then the status // code is known). For tracking scrape durations, use the // "scrape_duration_seconds" gauge created by the Prometheus server upon each // scrape. func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { cnt := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "promhttp_metric_handler_requests_total", Help: "Total number of scrapes by HTTP status code.", }, []string{"code"}, ) // Initialize the most likely HTTP status codes. cnt.WithLabelValues("200") cnt.WithLabelValues("500") cnt.WithLabelValues("503") if err := reg.Register(cnt); err != nil { if are, ok := err.(prometheus.AlreadyRegisteredError); ok { cnt = are.ExistingCollector.(*prometheus.CounterVec) } else { panic(err) } } gge := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "promhttp_metric_handler_requests_in_flight", Help: "Current number of scrapes being served.", }) if err := reg.Register(gge); err != nil { if are, ok := err.(prometheus.AlreadyRegisteredError); ok { gge = are.ExistingCollector.(prometheus.Gauge) } else { panic(err) } } return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) } // HandlerErrorHandling defines how a Handler serving metrics will handle // errors. type HandlerErrorHandling int // These constants cause handlers serving metrics to behave as described if // errors are encountered. const ( // Serve an HTTP status code 500 upon the first error // encountered. Report the error message in the body. Note that HTTP // errors cannot be served anymore once the beginning of a regular // payload has been sent. Thus, in the (unlikely) case that encoding the // payload into the negotiated wire format fails, serving the response // will simply be aborted. Set an ErrorLog in HandlerOpts to detect // those errors. HTTPErrorOnError HandlerErrorHandling = iota // Ignore errors and try to serve as many metrics as possible. However, // if no metrics can be served, serve an HTTP status code 500 and the // last error message in the body. Only use this in deliberate "best // effort" metrics collection scenarios. In this case, it is highly // recommended to provide other means of detecting errors: By setting an // ErrorLog in HandlerOpts, the errors are logged. By providing a // Registry in HandlerOpts, the exposed metrics include an error counter // "promhttp_metric_handler_errors_total", which can be used for // alerts. ContinueOnError // Panic upon the first error encountered (useful for "crash only" apps). PanicOnError ) // Logger is the minimal interface HandlerOpts needs for logging. Note that // log.Logger from the standard library implements this interface, and it is // easy to implement by custom loggers, if they don't do so already anyway. type Logger interface { Println(v ...interface{}) } // HandlerOpts specifies options how to serve metrics via an http.Handler. The // zero value of HandlerOpts is a reasonable default. type HandlerOpts struct { // ErrorLog specifies an optional Logger for errors collecting and // serving metrics. If nil, errors are not logged at all. Note that the // type of a reported error is often prometheus.MultiError, which // formats into a multi-line error string. If you want to avoid the // latter, create a Logger implementation that detects a // prometheus.MultiError and formats the contained errors into one line. ErrorLog Logger // ErrorHandling defines how errors are handled. Note that errors are // logged regardless of the configured ErrorHandling provided ErrorLog // is not nil. ErrorHandling HandlerErrorHandling // If Registry is not nil, it is used to register a metric // "promhttp_metric_handler_errors_total", partitioned by "cause". A // failed registration causes a panic. Note that this error counter is // different from the instrumentation you get from the various // InstrumentHandler... helpers. It counts errors that don't necessarily // result in a non-2xx HTTP status code. There are two typical cases: // (1) Encoding errors that only happen after streaming of the HTTP body // has already started (and the status code 200 has been sent). This // should only happen with custom collectors. (2) Collection errors with // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer // If DisableCompression is true, the handler will never compress the // response, even if requested by the client. DisableCompression bool // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If // MaxRequestsInFlight is 0 or negative, no limit is applied. MaxRequestsInFlight int // If handling a request takes longer than Timeout, it is responded to // with 503 ServiceUnavailable and a suitable Message. No timeout is // applied if Timeout is 0 or negative. Note that with the current // implementation, reaching the timeout simply ends the HTTP requests as // described above (and even that only if sending of the body hasn't // started yet), while the bulk work of gathering all the metrics keeps // running in the background (with the eventual result to be thrown // away). Until the implementation is improved, it is recommended to // implement a separate timeout in potentially slow Collectors. Timeout time.Duration // If true, the experimental OpenMetrics encoding is added to the // possible options during content negotiation. Note that Prometheus // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is // the only way to transmit exemplars. However, the move to OpenMetrics // is not completely transparent. Most notably, the values of "quantile" // labels of Summaries and "le" labels of Histograms are formatted with // a trailing ".0" if they would otherwise look like integer numbers // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool } // gzipAccepted returns whether the client will accept gzip-encoded content. func gzipAccepted(header http.Header) bool { a := header.Get(acceptEncodingHeader) parts := strings.Split(a, ",") for _, part := range parts { part = strings.TrimSpace(part) if part == "gzip" || strings.HasPrefix(part, "gzip;") { return true } } return false } // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this // must not be called if the header or any payload has already been sent. func httpError(rsp http.ResponseWriter, err error) { rsp.Header().Del(contentEncodingHeader) http.Error( rsp, "An error has occurred while serving metrics:\n\n"+err.Error(), http.StatusInternalServerError, ) } client_golang-1.11.0/prometheus/promhttp/http_test.go000066400000000000000000000212251405741072000230160ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package promhttp import ( "bytes" "errors" "log" "net/http" "net/http/httptest" "strings" "testing" "time" "github.com/prometheus/client_golang/prometheus" ) type errorCollector struct{} func (e errorCollector) Describe(ch chan<- *prometheus.Desc) { ch <- prometheus.NewDesc("invalid_metric", "not helpful", nil, nil) } func (e errorCollector) Collect(ch chan<- prometheus.Metric) { ch <- prometheus.NewInvalidMetric( prometheus.NewDesc("invalid_metric", "not helpful", nil, nil), errors.New("collect error"), ) } type blockingCollector struct { CollectStarted, Block chan struct{} } func (b blockingCollector) Describe(ch chan<- *prometheus.Desc) { ch <- prometheus.NewDesc("dummy_desc", "not helpful", nil, nil) } func (b blockingCollector) Collect(ch chan<- prometheus.Metric) { select { case b.CollectStarted <- struct{}{}: default: } // Collects nothing, just waits for a channel receive. <-b.Block } func TestHandlerErrorHandling(t *testing.T) { // Create a registry that collects a MetricFamily with two elements, // another with one, and reports an error. Further down, we'll use the // same registry in the HandlerOpts. reg := prometheus.NewRegistry() cnt := prometheus.NewCounter(prometheus.CounterOpts{ Name: "the_count", Help: "Ah-ah-ah! Thunder and lightning!", }) reg.MustRegister(cnt) cntVec := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "name", Help: "docstring", ConstLabels: prometheus.Labels{"constname": "constvalue"}, }, []string{"labelname"}, ) cntVec.WithLabelValues("val1").Inc() cntVec.WithLabelValues("val2").Inc() reg.MustRegister(cntVec) reg.MustRegister(errorCollector{}) logBuf := &bytes.Buffer{} logger := log.New(logBuf, "", 0) writer := httptest.NewRecorder() request, _ := http.NewRequest("GET", "/", nil) request.Header.Add("Accept", "test/plain") errorHandler := HandlerFor(reg, HandlerOpts{ ErrorLog: logger, ErrorHandling: HTTPErrorOnError, Registry: reg, }) continueHandler := HandlerFor(reg, HandlerOpts{ ErrorLog: logger, ErrorHandling: ContinueOnError, Registry: reg, }) panicHandler := HandlerFor(reg, HandlerOpts{ ErrorLog: logger, ErrorHandling: PanicOnError, Registry: reg, }) wantMsg := `error gathering metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error ` wantErrorBody := `An error has occurred while serving metrics: error collecting metric Desc{fqName: "invalid_metric", help: "not helpful", constLabels: {}, variableLabels: []}: collect error ` wantOKBody1 := `# HELP name docstring # TYPE name counter name{constname="constvalue",labelname="val1"} 1 name{constname="constvalue",labelname="val2"} 1 # HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. # TYPE promhttp_metric_handler_errors_total counter promhttp_metric_handler_errors_total{cause="encoding"} 0 promhttp_metric_handler_errors_total{cause="gathering"} 1 # HELP the_count Ah-ah-ah! Thunder and lightning! # TYPE the_count counter the_count 0 ` // It might happen that counting the gathering error makes it to the // promhttp_metric_handler_errors_total counter before it is gathered // itself. Thus, we have to bodies that are acceptable for the test. wantOKBody2 := `# HELP name docstring # TYPE name counter name{constname="constvalue",labelname="val1"} 1 name{constname="constvalue",labelname="val2"} 1 # HELP promhttp_metric_handler_errors_total Total number of internal errors encountered by the promhttp metric handler. # TYPE promhttp_metric_handler_errors_total counter promhttp_metric_handler_errors_total{cause="encoding"} 0 promhttp_metric_handler_errors_total{cause="gathering"} 2 # HELP the_count Ah-ah-ah! Thunder and lightning! # TYPE the_count counter the_count 0 ` errorHandler.ServeHTTP(writer, request) if got, want := writer.Code, http.StatusInternalServerError; got != want { t.Errorf("got HTTP status code %d, want %d", got, want) } if got := logBuf.String(); got != wantMsg { t.Errorf("got log message:\n%s\nwant log message:\n%s\n", got, wantMsg) } if got := writer.Body.String(); got != wantErrorBody { t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody) } logBuf.Reset() writer.Body.Reset() writer.Code = http.StatusOK continueHandler.ServeHTTP(writer, request) if got, want := writer.Code, http.StatusOK; got != want { t.Errorf("got HTTP status code %d, want %d", got, want) } if got := logBuf.String(); got != wantMsg { t.Errorf("got log message %q, want %q", got, wantMsg) } if got := writer.Body.String(); got != wantOKBody1 && got != wantOKBody2 { t.Errorf("got body %q, want either %q or %q", got, wantOKBody1, wantOKBody2) } defer func() { if err := recover(); err == nil { t.Error("expected panic from panicHandler") } }() panicHandler.ServeHTTP(writer, request) } func TestInstrumentMetricHandler(t *testing.T) { reg := prometheus.NewRegistry() handler := InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{})) // Do it again to test idempotency. InstrumentMetricHandler(reg, HandlerFor(reg, HandlerOpts{})) writer := httptest.NewRecorder() request, _ := http.NewRequest("GET", "/", nil) request.Header.Add("Accept", "test/plain") handler.ServeHTTP(writer, request) if got, want := writer.Code, http.StatusOK; got != want { t.Errorf("got HTTP status code %d, want %d", got, want) } want := "promhttp_metric_handler_requests_in_flight 1\n" if got := writer.Body.String(); !strings.Contains(got, want) { t.Errorf("got body %q, does not contain %q", got, want) } want = "promhttp_metric_handler_requests_total{code=\"200\"} 0\n" if got := writer.Body.String(); !strings.Contains(got, want) { t.Errorf("got body %q, does not contain %q", got, want) } writer.Body.Reset() handler.ServeHTTP(writer, request) if got, want := writer.Code, http.StatusOK; got != want { t.Errorf("got HTTP status code %d, want %d", got, want) } want = "promhttp_metric_handler_requests_in_flight 1\n" if got := writer.Body.String(); !strings.Contains(got, want) { t.Errorf("got body %q, does not contain %q", got, want) } want = "promhttp_metric_handler_requests_total{code=\"200\"} 1\n" if got := writer.Body.String(); !strings.Contains(got, want) { t.Errorf("got body %q, does not contain %q", got, want) } } func TestHandlerMaxRequestsInFlight(t *testing.T) { reg := prometheus.NewRegistry() handler := HandlerFor(reg, HandlerOpts{MaxRequestsInFlight: 1}) w1 := httptest.NewRecorder() w2 := httptest.NewRecorder() w3 := httptest.NewRecorder() request, _ := http.NewRequest("GET", "/", nil) request.Header.Add("Accept", "test/plain") c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)} reg.MustRegister(c) rq1Done := make(chan struct{}) go func() { handler.ServeHTTP(w1, request) close(rq1Done) }() <-c.CollectStarted handler.ServeHTTP(w2, request) if got, want := w2.Code, http.StatusServiceUnavailable; got != want { t.Errorf("got HTTP status code %d, want %d", got, want) } if got, want := w2.Body.String(), "Limit of concurrent requests reached (1), try again later.\n"; got != want { t.Errorf("got body %q, want %q", got, want) } close(c.Block) <-rq1Done handler.ServeHTTP(w3, request) if got, want := w3.Code, http.StatusOK; got != want { t.Errorf("got HTTP status code %d, want %d", got, want) } } func TestHandlerTimeout(t *testing.T) { reg := prometheus.NewRegistry() handler := HandlerFor(reg, HandlerOpts{Timeout: time.Millisecond}) w := httptest.NewRecorder() request, _ := http.NewRequest("GET", "/", nil) request.Header.Add("Accept", "test/plain") c := blockingCollector{Block: make(chan struct{}), CollectStarted: make(chan struct{}, 1)} reg.MustRegister(c) handler.ServeHTTP(w, request) if got, want := w.Code, http.StatusServiceUnavailable; got != want { t.Errorf("got HTTP status code %d, want %d", got, want) } if got, want := w.Body.String(), "Exceeded configured timeout of 1ms.\n"; got != want { t.Errorf("got body %q, want %q", got, want) } close(c.Block) // To not leak a goroutine. } client_golang-1.11.0/prometheus/promhttp/instrument_client.go000066400000000000000000000174531405741072000245560ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package promhttp import ( "crypto/tls" "net/http" "net/http/httptrace" "time" "github.com/prometheus/client_golang/prometheus" ) // The RoundTripperFunc type is an adapter to allow the use of ordinary // functions as RoundTrippers. If f is a function with the appropriate // signature, RountTripperFunc(f) is a RoundTripper that calls f. type RoundTripperFunc func(req *http.Request) (*http.Response, error) // RoundTrip implements the RoundTripper interface. func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return rt(r) } // InstrumentRoundTripperInFlight is a middleware that wraps the provided // http.RoundTripper. It sets the provided prometheus.Gauge to the number of // requests currently handled by the wrapped http.RoundTripper. // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { gauge.Inc() defer gauge.Dec() return next.RoundTrip(r) }) } // InstrumentRoundTripperCounter is a middleware that wraps the provided // http.RoundTripper to observe the request result with the provided CounterVec. // The CounterVec must have zero, one, or two non-const non-curried labels. For // those, the only allowed label names are "code" and "method". The function // panics otherwise. Partitioning of the CounterVec happens by HTTP status code // and/or HTTP method if the respective instance label names are present in the // CounterVec. For unpartitioned counting, use a CounterVec with zero labels. // // If the wrapped RoundTripper panics or returns a non-nil error, the Counter // is not incremented. // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { code, method := checkLabels(counter) return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() } return resp, err }) } // InstrumentRoundTripperDuration is a middleware that wraps the provided // http.RoundTripper to observe the request duration with the provided // ObserverVec. The ObserverVec must have zero, one, or two non-const // non-curried labels. For those, the only allowed label names are "code" and // "method". The function panics otherwise. The Observe method of the Observer // in the ObserverVec is called with the request duration in // seconds. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For // unpartitioned observations, use an ObserverVec with zero labels. Note that // partitioning of Histograms is expensive and should be used judiciously. // // If the wrapped RoundTripper panics or returns a non-nil error, no values are // reported. // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { code, method := checkLabels(obs) return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) } return resp, err }) } // InstrumentTrace is used to offer flexibility in instrumenting the available // httptrace.ClientTrace hook functions. Each function is passed a float64 // representing the time in seconds since the start of the http request. A user // may choose to use separately buckets Histograms, or implement custom // instance labels on a per function basis. type InstrumentTrace struct { GotConn func(float64) PutIdleConn func(float64) GotFirstResponseByte func(float64) Got100Continue func(float64) DNSStart func(float64) DNSDone func(float64) ConnectStart func(float64) ConnectDone func(float64) TLSHandshakeStart func(float64) TLSHandshakeDone func(float64) WroteHeaders func(float64) Wait100Continue func(float64) WroteRequest func(float64) } // InstrumentRoundTripperTrace is a middleware that wraps the provided // RoundTripper and reports times to hook functions provided in the // InstrumentTrace struct. Hook functions that are not present in the provided // InstrumentTrace struct are ignored. Times reported to the hook functions are // time since the start of the request. Only with Go1.9+, those times are // guaranteed to never be negative. (Earlier Go versions are not using a // monotonic clock.) Note that partitioning of Histograms is expensive and // should be used judiciously. // // For hook functions that receive an error as an argument, no observations are // made in the event of a non-nil error value. // // See the example for ExampleInstrumentRoundTripperDuration for example usage. func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { start := time.Now() trace := &httptrace.ClientTrace{ GotConn: func(_ httptrace.GotConnInfo) { if it.GotConn != nil { it.GotConn(time.Since(start).Seconds()) } }, PutIdleConn: func(err error) { if err != nil { return } if it.PutIdleConn != nil { it.PutIdleConn(time.Since(start).Seconds()) } }, DNSStart: func(_ httptrace.DNSStartInfo) { if it.DNSStart != nil { it.DNSStart(time.Since(start).Seconds()) } }, DNSDone: func(_ httptrace.DNSDoneInfo) { if it.DNSDone != nil { it.DNSDone(time.Since(start).Seconds()) } }, ConnectStart: func(_, _ string) { if it.ConnectStart != nil { it.ConnectStart(time.Since(start).Seconds()) } }, ConnectDone: func(_, _ string, err error) { if err != nil { return } if it.ConnectDone != nil { it.ConnectDone(time.Since(start).Seconds()) } }, GotFirstResponseByte: func() { if it.GotFirstResponseByte != nil { it.GotFirstResponseByte(time.Since(start).Seconds()) } }, Got100Continue: func() { if it.Got100Continue != nil { it.Got100Continue(time.Since(start).Seconds()) } }, TLSHandshakeStart: func() { if it.TLSHandshakeStart != nil { it.TLSHandshakeStart(time.Since(start).Seconds()) } }, TLSHandshakeDone: func(_ tls.ConnectionState, err error) { if err != nil { return } if it.TLSHandshakeDone != nil { it.TLSHandshakeDone(time.Since(start).Seconds()) } }, WroteHeaders: func() { if it.WroteHeaders != nil { it.WroteHeaders(time.Since(start).Seconds()) } }, Wait100Continue: func() { if it.Wait100Continue != nil { it.Wait100Continue(time.Since(start).Seconds()) } }, WroteRequest: func(_ httptrace.WroteRequestInfo) { if it.WroteRequest != nil { it.WroteRequest(time.Since(start).Seconds()) } }, } r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) return next.RoundTrip(r) }) } client_golang-1.11.0/prometheus/promhttp/instrument_client_test.go000066400000000000000000000175371405741072000256200ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package promhttp import ( "context" "log" "net/http" "net/http/httptest" "strings" "testing" "time" "github.com/prometheus/client_golang/prometheus" ) func makeInstrumentedClient() (*http.Client, *prometheus.Registry) { client := http.DefaultClient client.Timeout = 1 * time.Second reg := prometheus.NewRegistry() inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "client_in_flight_requests", Help: "A gauge of in-flight requests for the wrapped client.", }) counter := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "client_api_requests_total", Help: "A counter for requests from the wrapped client.", }, []string{"code", "method"}, ) dnsLatencyVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "dns_duration_seconds", Help: "Trace dns latency histogram.", Buckets: []float64{.005, .01, .025, .05}, }, []string{"event"}, ) tlsLatencyVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "tls_duration_seconds", Help: "Trace tls latency histogram.", Buckets: []float64{.05, .1, .25, .5}, }, []string{"event"}, ) histVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "request_duration_seconds", Help: "A histogram of request latencies.", Buckets: prometheus.DefBuckets, }, []string{"method"}, ) reg.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) trace := &InstrumentTrace{ DNSStart: func(t float64) { dnsLatencyVec.WithLabelValues("dns_start").Observe(t) }, DNSDone: func(t float64) { dnsLatencyVec.WithLabelValues("dns_done").Observe(t) }, TLSHandshakeStart: func(t float64) { tlsLatencyVec.WithLabelValues("tls_handshake_start").Observe(t) }, TLSHandshakeDone: func(t float64) { tlsLatencyVec.WithLabelValues("tls_handshake_done").Observe(t) }, } client.Transport = InstrumentRoundTripperInFlight(inFlightGauge, InstrumentRoundTripperCounter(counter, InstrumentRoundTripperTrace(trace, InstrumentRoundTripperDuration(histVec, http.DefaultTransport), ), ), ) return client, reg } func TestClientMiddlewareAPI(t *testing.T) { client, reg := makeInstrumentedClient() backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) })) defer backend.Close() resp, err := client.Get(backend.URL) if err != nil { t.Fatal(err) } defer resp.Body.Close() mfs, err := reg.Gather() if err != nil { t.Fatal(err) } if want, got := 3, len(mfs); want != got { t.Fatalf("unexpected number of metric families gathered, want %d, got %d", want, got) } for _, mf := range mfs { if len(mf.Metric) == 0 { t.Errorf("metric family %s must not be empty", mf.GetName()) } } } func TestClientMiddlewareAPIWithRequestContext(t *testing.T) { client, reg := makeInstrumentedClient() backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) })) defer backend.Close() req, err := http.NewRequest("GET", backend.URL, nil) if err != nil { t.Fatalf("%v", err) } // Set a context with a long timeout. ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) defer cancel() req = req.WithContext(ctx) resp, err := client.Do(req) if err != nil { t.Fatal(err) } defer resp.Body.Close() mfs, err := reg.Gather() if err != nil { t.Fatal(err) } if want, got := 3, len(mfs); want != got { t.Fatalf("unexpected number of metric families gathered, want %d, got %d", want, got) } for _, mf := range mfs { if len(mf.Metric) == 0 { t.Errorf("metric family %s must not be empty", mf.GetName()) } } } func TestClientMiddlewareAPIWithRequestContextTimeout(t *testing.T) { client, _ := makeInstrumentedClient() // Slow testserver responding in 100ms. backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { time.Sleep(100 * time.Millisecond) w.WriteHeader(http.StatusOK) })) defer backend.Close() req, err := http.NewRequest("GET", backend.URL, nil) if err != nil { t.Fatalf("%v", err) } // Set a context with a short timeout. ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() req = req.WithContext(ctx) _, err = client.Do(req) if err == nil { t.Fatal("did not get timeout error") } expectedMsg := "context deadline exceeded" if !strings.Contains(err.Error(), expectedMsg) { t.Fatalf("unexpected error: %q, expect error: %q", err.Error(), expectedMsg) } } func ExampleInstrumentRoundTripperDuration() { client := http.DefaultClient client.Timeout = 1 * time.Second inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "client_in_flight_requests", Help: "A gauge of in-flight requests for the wrapped client.", }) counter := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "client_api_requests_total", Help: "A counter for requests from the wrapped client.", }, []string{"code", "method"}, ) // dnsLatencyVec uses custom buckets based on expected dns durations. // It has an instance label "event", which is set in the // DNSStart and DNSDonehook functions defined in the // InstrumentTrace struct below. dnsLatencyVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "dns_duration_seconds", Help: "Trace dns latency histogram.", Buckets: []float64{.005, .01, .025, .05}, }, []string{"event"}, ) // tlsLatencyVec uses custom buckets based on expected tls durations. // It has an instance label "event", which is set in the // TLSHandshakeStart and TLSHandshakeDone hook functions defined in the // InstrumentTrace struct below. tlsLatencyVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "tls_duration_seconds", Help: "Trace tls latency histogram.", Buckets: []float64{.05, .1, .25, .5}, }, []string{"event"}, ) // histVec has no labels, making it a zero-dimensional ObserverVec. histVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "request_duration_seconds", Help: "A histogram of request latencies.", Buckets: prometheus.DefBuckets, }, []string{}, ) // Register all of the metrics in the standard registry. prometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge) // Define functions for the available httptrace.ClientTrace hook // functions that we want to instrument. trace := &InstrumentTrace{ DNSStart: func(t float64) { dnsLatencyVec.WithLabelValues("dns_start").Observe(t) }, DNSDone: func(t float64) { dnsLatencyVec.WithLabelValues("dns_done").Observe(t) }, TLSHandshakeStart: func(t float64) { tlsLatencyVec.WithLabelValues("tls_handshake_start").Observe(t) }, TLSHandshakeDone: func(t float64) { tlsLatencyVec.WithLabelValues("tls_handshake_done").Observe(t) }, } // Wrap the default RoundTripper with middleware. roundTripper := InstrumentRoundTripperInFlight(inFlightGauge, InstrumentRoundTripperCounter(counter, InstrumentRoundTripperTrace(trace, InstrumentRoundTripperDuration(histVec, http.DefaultTransport), ), ), ) // Set the RoundTripper on our client. client.Transport = roundTripper resp, err := client.Get("http://google.com") if err != nil { log.Printf("error: %v", err) } defer resp.Body.Close() } client_golang-1.11.0/prometheus/promhttp/instrument_server.go000066400000000000000000000335441405741072000246050ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package promhttp import ( "errors" "net/http" "strconv" "strings" "time" dto "github.com/prometheus/client_model/go" "github.com/prometheus/client_golang/prometheus" ) // magicString is used for the hacky label test in checkLabels. Remove once fixed. const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" // InstrumentHandlerInFlight is a middleware that wraps the provided // http.Handler. It sets the provided prometheus.Gauge to the number of // requests currently handled by the wrapped http.Handler. // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { g.Inc() defer g.Dec() next.ServeHTTP(w, r) }) } // InstrumentHandlerDuration is a middleware that wraps the provided // http.Handler to observe the request duration with the provided ObserverVec. // The ObserverVec must have valid metric and label names and must have zero, // one, or two non-const non-curried labels. For those, the only allowed label // names are "code" and "method". The function panics otherwise. The Observe // method of the Observer in the ObserverVec is called with the request duration // in seconds. Partitioning happens by HTTP status code and/or HTTP method if // the respective instance label names are present in the ObserverVec. For // unpartitioned observations, use an ObserverVec with zero labels. Note that // partitioning of Histograms is expensive and should be used judiciously. // // If the wrapped Handler does not set a status code, a status code of 200 is assumed. // // If the wrapped Handler panics, no values are reported. // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { code, method := checkLabels(obs) if code { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, nil) next.ServeHTTP(d, r) obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) }) } // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler // to observe the request result with the provided CounterVec. The CounterVec // must have valid metric and label names and must have zero, one, or two // non-const non-curried labels. For those, the only allowed label names are // "code" and "method". The function panics otherwise. Partitioning of the // CounterVec happens by HTTP status code and/or HTTP method if the respective // instance label names are present in the CounterVec. For unpartitioned // counting, use a CounterVec with zero labels. // // If the wrapped Handler does not set a status code, a status code of 200 is assumed. // // If the wrapped Handler panics, the Counter is not incremented. // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { code, method := checkLabels(counter) if code { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) counter.With(labels(code, method, r.Method, d.Status())).Inc() }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) counter.With(labels(code, method, r.Method, 0)).Inc() }) } // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided // http.Handler to observe with the provided ObserverVec the request duration // until the response headers are written. The ObserverVec must have valid // metric and label names and must have zero, one, or two non-const non-curried // labels. For those, the only allowed label names are "code" and "method". The // function panics otherwise. The Observe method of the Observer in the // ObserverVec is called with the request duration in seconds. Partitioning // happens by HTTP status code and/or HTTP method if the respective instance // label names are present in the ObserverVec. For unpartitioned observations, // use an ObserverVec with zero labels. Note that partitioning of Histograms is // expensive and should be used judiciously. // // If the wrapped Handler panics before calling WriteHeader, no value is // reported. // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { code, method := checkLabels(obs) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) }) next.ServeHTTP(d, r) }) } // InstrumentHandlerRequestSize is a middleware that wraps the provided // http.Handler to observe the request size with the provided ObserverVec. The // ObserverVec must have valid metric and label names and must have zero, one, // or two non-const non-curried labels. For those, the only allowed label names // are "code" and "method". The function panics otherwise. The Observe method of // the Observer in the ObserverVec is called with the request size in // bytes. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For // unpartitioned observations, use an ObserverVec with zero labels. Note that // partitioning of Histograms is expensive and should be used judiciously. // // If the wrapped Handler does not set a status code, a status code of 200 is assumed. // // If the wrapped Handler panics, no values are reported. // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { code, method := checkLabels(obs) if code { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) }) } // InstrumentHandlerResponseSize is a middleware that wraps the provided // http.Handler to observe the response size with the provided ObserverVec. The // ObserverVec must have valid metric and label names and must have zero, one, // or two non-const non-curried labels. For those, the only allowed label names // are "code" and "method". The function panics otherwise. The Observe method of // the Observer in the ObserverVec is called with the response size in // bytes. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For // unpartitioned observations, use an ObserverVec with zero labels. Note that // partitioning of Histograms is expensive and should be used judiciously. // // If the wrapped Handler does not set a status code, a status code of 200 is assumed. // // If the wrapped Handler panics, no values are reported. // // See the example for InstrumentHandlerDuration for example usage. func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { code, method := checkLabels(obs) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) }) } // checkLabels returns whether the provided Collector has a non-const, // non-curried label named "code" and/or "method". It panics if the provided // Collector does not have a Desc or has more than one Desc or its Desc is // invalid. It also panics if the Collector has any non-const, non-curried // labels that are not named "code" or "method". func checkLabels(c prometheus.Collector) (code bool, method bool) { // TODO(beorn7): Remove this hacky way to check for instance labels // once Descriptors can have their dimensionality queried. var ( desc *prometheus.Desc m prometheus.Metric pm dto.Metric lvs []string ) // Get the Desc from the Collector. descc := make(chan *prometheus.Desc, 1) c.Describe(descc) select { case desc = <-descc: default: panic("no description provided by collector") } select { case <-descc: panic("more than one description provided by collector") default: } close(descc) // Make sure the Collector has a valid Desc by registering it with a // temporary registry. prometheus.NewRegistry().MustRegister(c) // Create a ConstMetric with the Desc. Since we don't know how many // variable labels there are, try for as long as it needs. for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) } // Write out the metric into a proto message and look at the labels. // If the value is not the magicString, it is a constLabel, which doesn't interest us. // If the label is curried, it doesn't interest us. // In all other cases, only "code" or "method" is allowed. if err := m.Write(&pm); err != nil { panic("error checking metric for labels") } for _, label := range pm.Label { name, value := label.GetName(), label.GetValue() if value != magicString || isLabelCurried(c, name) { continue } switch name { case "code": code = true case "method": method = true default: panic("metric partitioned with non-supported labels") } } return } func isLabelCurried(c prometheus.Collector, label string) bool { // This is even hackier than the label test above. // We essentially try to curry again and see if it works. // But for that, we need to type-convert to the two // types we use here, ObserverVec or *CounterVec. switch v := c.(type) { case *prometheus.CounterVec: if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { return false } case prometheus.ObserverVec: if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { return false } default: panic("unsupported metric vec type") } return true } // emptyLabels is a one-time allocation for non-partitioned metrics to avoid // unnecessary allocations on each request. var emptyLabels = prometheus.Labels{} func labels(code, method bool, reqMethod string, status int) prometheus.Labels { if !(code || method) { return emptyLabels } labels := prometheus.Labels{} if code { labels["code"] = sanitizeCode(status) } if method { labels["method"] = sanitizeMethod(reqMethod) } return labels } func computeApproximateRequestSize(r *http.Request) int { s := 0 if r.URL != nil { s += len(r.URL.String()) } s += len(r.Method) s += len(r.Proto) for name, values := range r.Header { s += len(name) for _, value := range values { s += len(value) } } s += len(r.Host) // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. if r.ContentLength != -1 { s += int(r.ContentLength) } return s } func sanitizeMethod(m string) string { switch m { case "GET", "get": return "get" case "PUT", "put": return "put" case "HEAD", "head": return "head" case "POST", "post": return "post" case "DELETE", "delete": return "delete" case "CONNECT", "connect": return "connect" case "OPTIONS", "options": return "options" case "NOTIFY", "notify": return "notify" default: return strings.ToLower(m) } } // If the wrapped http.Handler has not set a status code, i.e. the value is // currently 0, santizeCode will return 200, for consistency with behavior in // the stdlib. func sanitizeCode(s int) string { switch s { case 100: return "100" case 101: return "101" case 200, 0: return "200" case 201: return "201" case 202: return "202" case 203: return "203" case 204: return "204" case 205: return "205" case 206: return "206" case 300: return "300" case 301: return "301" case 302: return "302" case 304: return "304" case 305: return "305" case 307: return "307" case 400: return "400" case 401: return "401" case 402: return "402" case 403: return "403" case 404: return "404" case 405: return "405" case 406: return "406" case 407: return "407" case 408: return "408" case 409: return "409" case 410: return "410" case 411: return "411" case 412: return "412" case 413: return "413" case 414: return "414" case 415: return "415" case 416: return "416" case 417: return "417" case 418: return "418" case 500: return "500" case 501: return "501" case 502: return "502" case 503: return "503" case 504: return "504" case 505: return "505" case 428: return "428" case 429: return "429" case 431: return "431" case 511: return "511" default: return strconv.Itoa(s) } } client_golang-1.11.0/prometheus/promhttp/instrument_server_test.go000066400000000000000000000301661405741072000256410ustar00rootroot00000000000000// Copyright 2017 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package promhttp import ( "io" "log" "net/http" "net/http/httptest" "testing" "github.com/prometheus/client_golang/prometheus" ) func TestLabelCheck(t *testing.T) { scenarios := map[string]struct { metricName string // Defaults to "c". varLabels []string constLabels []string curriedLabels []string ok bool }{ "empty": { varLabels: []string{}, constLabels: []string{}, curriedLabels: []string{}, ok: true, }, "code as single var label": { varLabels: []string{"code"}, constLabels: []string{}, curriedLabels: []string{}, ok: true, }, "method as single var label": { varLabels: []string{"method"}, constLabels: []string{}, curriedLabels: []string{}, ok: true, }, "code and method as var labels": { varLabels: []string{"method", "code"}, constLabels: []string{}, curriedLabels: []string{}, ok: true, }, "valid case with all labels used": { varLabels: []string{"code", "method"}, constLabels: []string{"foo", "bar"}, curriedLabels: []string{"dings", "bums"}, ok: true, }, "all labels used with an invalid const label name": { varLabels: []string{"code", "method"}, constLabels: []string{"in-valid", "bar"}, curriedLabels: []string{"dings", "bums"}, ok: false, }, "unsupported var label": { varLabels: []string{"foo"}, constLabels: []string{}, curriedLabels: []string{}, ok: false, }, "mixed var labels": { varLabels: []string{"method", "foo", "code"}, constLabels: []string{}, curriedLabels: []string{}, ok: false, }, "unsupported var label but curried": { varLabels: []string{}, constLabels: []string{}, curriedLabels: []string{"foo"}, ok: true, }, "mixed var labels but unsupported curried": { varLabels: []string{"code", "method"}, constLabels: []string{}, curriedLabels: []string{"foo"}, ok: true, }, "supported label as const and curry": { varLabels: []string{}, constLabels: []string{"code"}, curriedLabels: []string{"method"}, ok: true, }, "supported label as const and curry with unsupported as var": { varLabels: []string{"foo"}, constLabels: []string{"code"}, curriedLabels: []string{"method"}, ok: false, }, "invalid name and otherwise empty": { metricName: "in-valid", varLabels: []string{}, constLabels: []string{}, curriedLabels: []string{}, ok: false, }, "invalid name with all the otherwise valid labels": { metricName: "in-valid", varLabels: []string{"code", "method"}, constLabels: []string{"foo", "bar"}, curriedLabels: []string{"dings", "bums"}, ok: false, }, } for name, sc := range scenarios { t.Run(name, func(t *testing.T) { metricName := sc.metricName if metricName == "" { metricName = "c" } constLabels := prometheus.Labels{} for _, l := range sc.constLabels { constLabels[l] = "dummy" } c := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: metricName, Help: "c help", ConstLabels: constLabels, }, append(sc.varLabels, sc.curriedLabels...), ) o := prometheus.ObserverVec(prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: metricName, Help: "c help", ConstLabels: constLabels, }, append(sc.varLabels, sc.curriedLabels...), )) for _, l := range sc.curriedLabels { c = c.MustCurryWith(prometheus.Labels{l: "dummy"}) o = o.MustCurryWith(prometheus.Labels{l: "dummy"}) } func() { defer func() { if err := recover(); err != nil { if sc.ok { t.Error("unexpected panic:", err) } } else if !sc.ok { t.Error("expected panic") } }() InstrumentHandlerCounter(c, nil) }() func() { defer func() { if err := recover(); err != nil { if sc.ok { t.Error("unexpected panic:", err) } } else if !sc.ok { t.Error("expected panic") } }() InstrumentHandlerDuration(o, nil) }() if sc.ok { // Test if wantCode and wantMethod were detected correctly. var wantCode, wantMethod bool for _, l := range sc.varLabels { if l == "code" { wantCode = true } if l == "method" { wantMethod = true } } gotCode, gotMethod := checkLabels(c) if gotCode != wantCode { t.Errorf("wanted code=%t for counter, got code=%t", wantCode, gotCode) } if gotMethod != wantMethod { t.Errorf("wanted method=%t for counter, got method=%t", wantMethod, gotMethod) } gotCode, gotMethod = checkLabels(o) if gotCode != wantCode { t.Errorf("wanted code=%t for observer, got code=%t", wantCode, gotCode) } if gotMethod != wantMethod { t.Errorf("wanted method=%t for observer, got method=%t", wantMethod, gotMethod) } } }) } } func TestMiddlewareAPI(t *testing.T) { reg := prometheus.NewRegistry() inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "in_flight_requests", Help: "A gauge of requests currently being served by the wrapped handler.", }) counter := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "api_requests_total", Help: "A counter for requests to the wrapped handler.", }, []string{"code", "method"}, ) histVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "response_duration_seconds", Help: "A histogram of request latencies.", Buckets: prometheus.DefBuckets, ConstLabels: prometheus.Labels{"handler": "api"}, }, []string{"method"}, ) writeHeaderVec := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "write_header_duration_seconds", Help: "A histogram of time to first write latencies.", Buckets: prometheus.DefBuckets, ConstLabels: prometheus.Labels{"handler": "api"}, }, []string{}, ) responseSize := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "push_request_size_bytes", Help: "A histogram of request sizes for requests.", Buckets: []float64{200, 500, 900, 1500}, }, []string{}, ) handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("OK")) }) reg.MustRegister(inFlightGauge, counter, histVec, responseSize, writeHeaderVec) chain := InstrumentHandlerInFlight(inFlightGauge, InstrumentHandlerCounter(counter, InstrumentHandlerDuration(histVec, InstrumentHandlerTimeToWriteHeader(writeHeaderVec, InstrumentHandlerResponseSize(responseSize, handler), ), ), ), ) r, _ := http.NewRequest("GET", "www.example.com", nil) w := httptest.NewRecorder() chain.ServeHTTP(w, r) } func TestInstrumentTimeToFirstWrite(t *testing.T) { var i int dobs := &responseWriterDelegator{ ResponseWriter: httptest.NewRecorder(), observeWriteHeader: func(status int) { i = status }, } d := newDelegator(dobs, nil) d.WriteHeader(http.StatusOK) if i != http.StatusOK { t.Fatalf("failed to execute observeWriteHeader") } } // testResponseWriter is an http.ResponseWriter that also implements // http.CloseNotifier, http.Flusher, and io.ReaderFrom. type testResponseWriter struct { closeNotifyCalled, flushCalled, readFromCalled bool } func (t *testResponseWriter) Header() http.Header { return nil } func (t *testResponseWriter) Write([]byte) (int, error) { return 0, nil } func (t *testResponseWriter) WriteHeader(int) {} func (t *testResponseWriter) CloseNotify() <-chan bool { t.closeNotifyCalled = true return nil } func (t *testResponseWriter) Flush() { t.flushCalled = true } func (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) { t.readFromCalled = true return 0, nil } // testFlusher is an http.ResponseWriter that also implements http.Flusher. type testFlusher struct { flushCalled bool } func (t *testFlusher) Header() http.Header { return nil } func (t *testFlusher) Write([]byte) (int, error) { return 0, nil } func (t *testFlusher) WriteHeader(int) {} func (t *testFlusher) Flush() { t.flushCalled = true } func TestInterfaceUpgrade(t *testing.T) { w := &testResponseWriter{} d := newDelegator(w, nil) //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. d.(http.CloseNotifier).CloseNotify() if !w.closeNotifyCalled { t.Error("CloseNotify not called") } d.(http.Flusher).Flush() if !w.flushCalled { t.Error("Flush not called") } d.(io.ReaderFrom).ReadFrom(nil) if !w.readFromCalled { t.Error("ReadFrom not called") } if _, ok := d.(http.Hijacker); ok { t.Error("delegator unexpectedly implements http.Hijacker") } f := &testFlusher{} d = newDelegator(f, nil) //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. if _, ok := d.(http.CloseNotifier); ok { t.Error("delegator unexpectedly implements http.CloseNotifier") } d.(http.Flusher).Flush() if !w.flushCalled { t.Error("Flush not called") } if _, ok := d.(io.ReaderFrom); ok { t.Error("delegator unexpectedly implements io.ReaderFrom") } if _, ok := d.(http.Hijacker); ok { t.Error("delegator unexpectedly implements http.Hijacker") } } func ExampleInstrumentHandlerDuration() { inFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "in_flight_requests", Help: "A gauge of requests currently being served by the wrapped handler.", }) counter := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "api_requests_total", Help: "A counter for requests to the wrapped handler.", }, []string{"code", "method"}, ) // duration is partitioned by the HTTP method and handler. It uses custom // buckets based on the expected request duration. duration := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "request_duration_seconds", Help: "A histogram of latencies for requests.", Buckets: []float64{.25, .5, 1, 2.5, 5, 10}, }, []string{"handler", "method"}, ) // responseSize has no labels, making it a zero-dimensional // ObserverVec. responseSize := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "response_size_bytes", Help: "A histogram of response sizes for requests.", Buckets: []float64{200, 500, 900, 1500}, }, []string{}, ) // Create the handlers that will be wrapped by the middleware. pushHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("Push")) }) pullHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("Pull")) }) // Register all of the metrics in the standard registry. prometheus.MustRegister(inFlightGauge, counter, duration, responseSize) // Instrument the handlers with all the metrics, injecting the "handler" // label by currying. pushChain := InstrumentHandlerInFlight(inFlightGauge, InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "push"}), InstrumentHandlerCounter(counter, InstrumentHandlerResponseSize(responseSize, pushHandler), ), ), ) pullChain := InstrumentHandlerInFlight(inFlightGauge, InstrumentHandlerDuration(duration.MustCurryWith(prometheus.Labels{"handler": "pull"}), InstrumentHandlerCounter(counter, InstrumentHandlerResponseSize(responseSize, pullHandler), ), ), ) http.Handle("/metrics", Handler()) http.Handle("/push", pushChain) http.Handle("/pull", pullChain) if err := http.ListenAndServe(":3000", nil); err != nil { log.Fatal(err) } } client_golang-1.11.0/prometheus/push/000077500000000000000000000000001405741072000175515ustar00rootroot00000000000000client_golang-1.11.0/prometheus/push/example_add_from_gatherer_test.go000066400000000000000000000053211405741072000263070ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package push_test import ( "fmt" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/push" ) var ( completionTime = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "db_backup_last_completion_timestamp_seconds", Help: "The timestamp of the last completion of a DB backup, successful or not.", }) successTime = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "db_backup_last_success_timestamp_seconds", Help: "The timestamp of the last successful completion of a DB backup.", }) duration = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "db_backup_duration_seconds", Help: "The duration of the last DB backup in seconds.", }) records = prometheus.NewGauge(prometheus.GaugeOpts{ Name: "db_backup_records_processed", Help: "The number of records processed in the last DB backup.", }) ) func performBackup() (int, error) { // Perform the backup and return the number of backed up records and any // applicable error. // ... return 42, nil } func ExamplePusher_Add() { // We use a registry here to benefit from the consistency checks that // happen during registration. registry := prometheus.NewRegistry() registry.MustRegister(completionTime, duration, records) // Note that successTime is not registered. pusher := push.New("http://pushgateway:9091", "db_backup").Gatherer(registry) start := time.Now() n, err := performBackup() records.Set(float64(n)) // Note that time.Since only uses a monotonic clock in Go1.9+. duration.Set(time.Since(start).Seconds()) completionTime.SetToCurrentTime() if err != nil { fmt.Println("DB backup failed:", err) } else { // Add successTime to pusher only in case of success. // We could as well register it with the registry. // This example, however, demonstrates that you can // mix Gatherers and Collectors when handling a Pusher. pusher.Collector(successTime) successTime.SetToCurrentTime() } // Add is used here rather than Push to not delete a previously pushed // success timestamp in case of a failure of this backup. if err := pusher.Add(); err != nil { fmt.Println("Could not push to Pushgateway:", err) } } client_golang-1.11.0/prometheus/push/examples_test.go000066400000000000000000000022751405741072000227630ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package push_test import ( "fmt" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/push" ) func ExamplePusher_Push() { completionTime := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "db_backup_last_completion_timestamp_seconds", Help: "The timestamp of the last successful completion of a DB backup.", }) completionTime.SetToCurrentTime() if err := push.New("http://pushgateway:9091", "db_backup"). Collector(completionTime). Grouping("db", "customers"). Push(); err != nil { fmt.Println("Could not push completion time to Pushgateway:", err) } } client_golang-1.11.0/prometheus/push/push.go000066400000000000000000000251671405741072000210720ustar00rootroot00000000000000// Copyright 2015 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package push provides functions to push metrics to a Pushgateway. It uses a // builder approach. Create a Pusher with New and then add the various options // by using its methods, finally calling Add or Push, like this: // // // Easy case: // push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push() // // // Complex case: // push.New("http://example.org/metrics", "my_job"). // Collector(myCollector1). // Collector(myCollector2). // Grouping("zone", "xy"). // Client(&myHTTPClient). // BasicAuth("top", "secret"). // Add() // // See the examples section for more detailed examples. // // See the documentation of the Pushgateway to understand the meaning of // the grouping key and the differences between Push and Add: // https://github.com/prometheus/pushgateway package push import ( "bytes" "encoding/base64" "errors" "fmt" "io/ioutil" "net/http" "net/url" "strings" "github.com/prometheus/common/expfmt" "github.com/prometheus/common/model" "github.com/prometheus/client_golang/prometheus" ) const ( contentTypeHeader = "Content-Type" // base64Suffix is appended to a label name in the request URL path to // mark the following label value as base64 encoded. base64Suffix = "@base64" ) var errJobEmpty = errors.New("job name is empty") // HTTPDoer is an interface for the one method of http.Client that is used by Pusher type HTTPDoer interface { Do(*http.Request) (*http.Response, error) } // Pusher manages a push to the Pushgateway. Use New to create one, configure it // with its methods, and finally use the Add or Push method to push. type Pusher struct { error error url, job string grouping map[string]string gatherers prometheus.Gatherers registerer prometheus.Registerer client HTTPDoer useBasicAuth bool username, password string expfmt expfmt.Format } // New creates a new Pusher to push to the provided URL with the provided job // name (which must not be empty). You can use just host:port or ip:port as url, // in which case “http://” is added automatically. Alternatively, include the // schema in the URL. However, do not include the “/metrics/jobs/…” part. func New(url, job string) *Pusher { var ( reg = prometheus.NewRegistry() err error ) if job == "" { err = errJobEmpty } if !strings.Contains(url, "://") { url = "http://" + url } if strings.HasSuffix(url, "/") { url = url[:len(url)-1] } return &Pusher{ error: err, url: url, job: job, grouping: map[string]string{}, gatherers: prometheus.Gatherers{reg}, registerer: reg, client: &http.Client{}, expfmt: expfmt.FmtProtoDelim, } } // Push collects/gathers all metrics from all Collectors and Gatherers added to // this Pusher. Then, it pushes them to the Pushgateway configured while // creating this Pusher, using the configured job name and any added grouping // labels as grouping key. All previously pushed metrics with the same job and // other grouping labels will be replaced with the metrics pushed by this // call. (It uses HTTP method “PUT” to push to the Pushgateway.) // // Push returns the first error encountered by any method call (including this // one) in the lifetime of the Pusher. func (p *Pusher) Push() error { return p.push(http.MethodPut) } // Add works like push, but only previously pushed metrics with the same name // (and the same job and other grouping labels) will be replaced. (It uses HTTP // method “POST” to push to the Pushgateway.) func (p *Pusher) Add() error { return p.push(http.MethodPost) } // Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered // to push them to the Pushgateway. The gathered metrics must not contain a job // label of their own. // // For convenience, this method returns a pointer to the Pusher itself. func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher { p.gatherers = append(p.gatherers, g) return p } // Collector adds a Collector to the Pusher, from which metrics will be // collected to push them to the Pushgateway. The collected metrics must not // contain a job label of their own. // // For convenience, this method returns a pointer to the Pusher itself. func (p *Pusher) Collector(c prometheus.Collector) *Pusher { if p.error == nil { p.error = p.registerer.Register(c) } return p } // Grouping adds a label pair to the grouping key of the Pusher, replacing any // previously added label pair with the same label name. Note that setting any // labels in the grouping key that are already contained in the metrics to push // will lead to an error. // // For convenience, this method returns a pointer to the Pusher itself. func (p *Pusher) Grouping(name, value string) *Pusher { if p.error == nil { if !model.LabelName(name).IsValid() { p.error = fmt.Errorf("grouping label has invalid name: %s", name) return p } p.grouping[name] = value } return p } // Client sets a custom HTTP client for the Pusher. For convenience, this method // returns a pointer to the Pusher itself. // Pusher only needs one method of the custom HTTP client: Do(*http.Request). // Thus, rather than requiring a fully fledged http.Client, // the provided client only needs to implement the HTTPDoer interface. // Since *http.Client naturally implements that interface, it can still be used normally. func (p *Pusher) Client(c HTTPDoer) *Pusher { p.client = c return p } // BasicAuth configures the Pusher to use HTTP Basic Authentication with the // provided username and password. For convenience, this method returns a // pointer to the Pusher itself. func (p *Pusher) BasicAuth(username, password string) *Pusher { p.useBasicAuth = true p.username = username p.password = password return p } // Format configures the Pusher to use an encoding format given by the // provided expfmt.Format. The default format is expfmt.FmtProtoDelim and // should be used with the standard Prometheus Pushgateway. Custom // implementations may require different formats. For convenience, this // method returns a pointer to the Pusher itself. func (p *Pusher) Format(format expfmt.Format) *Pusher { p.expfmt = format return p } // Delete sends a “DELETE” request to the Pushgateway configured while creating // this Pusher, using the configured job name and any added grouping labels as // grouping key. Any added Gatherers and Collectors added to this Pusher are // ignored by this method. // // Delete returns the first error encountered by any method call (including this // one) in the lifetime of the Pusher. func (p *Pusher) Delete() error { if p.error != nil { return p.error } req, err := http.NewRequest(http.MethodDelete, p.fullURL(), nil) if err != nil { return err } if p.useBasicAuth { req.SetBasicAuth(p.username, p.password) } resp, err := p.client.Do(req) if err != nil { return err } defer resp.Body.Close() if resp.StatusCode != http.StatusAccepted { body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. return fmt.Errorf("unexpected status code %d while deleting %s: %s", resp.StatusCode, p.fullURL(), body) } return nil } func (p *Pusher) push(method string) error { if p.error != nil { return p.error } mfs, err := p.gatherers.Gather() if err != nil { return err } buf := &bytes.Buffer{} enc := expfmt.NewEncoder(buf, p.expfmt) // Check for pre-existing grouping labels: for _, mf := range mfs { for _, m := range mf.GetMetric() { for _, l := range m.GetLabel() { if l.GetName() == "job" { return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m) } if _, ok := p.grouping[l.GetName()]; ok { return fmt.Errorf( "pushed metric %s (%s) already contains grouping label %s", mf.GetName(), m, l.GetName(), ) } } } enc.Encode(mf) } req, err := http.NewRequest(method, p.fullURL(), buf) if err != nil { return err } if p.useBasicAuth { req.SetBasicAuth(p.username, p.password) } req.Header.Set(contentTypeHeader, string(p.expfmt)) resp, err := p.client.Do(req) if err != nil { return err } defer resp.Body.Close() // Depending on version and configuration of the PGW, StatusOK or StatusAccepted may be returned. if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusAccepted { body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only. return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, p.fullURL(), body) } return nil } // fullURL assembles the URL used to push/delete metrics and returns it as a // string. The job name and any grouping label values containing a '/' will // trigger a base64 encoding of the affected component and proper suffixing of // the preceding component. Similarly, an empty grouping label value will be // encoded as base64 just with a single `=` padding character (to avoid an empty // path component). If the component does not contain a '/' but other special // characters, the usual url.QueryEscape is used for compatibility with older // versions of the Pushgateway and for better readability. func (p *Pusher) fullURL() string { urlComponents := []string{} if encodedJob, base64 := encodeComponent(p.job); base64 { urlComponents = append(urlComponents, "job"+base64Suffix, encodedJob) } else { urlComponents = append(urlComponents, "job", encodedJob) } for ln, lv := range p.grouping { if encodedLV, base64 := encodeComponent(lv); base64 { urlComponents = append(urlComponents, ln+base64Suffix, encodedLV) } else { urlComponents = append(urlComponents, ln, encodedLV) } } return fmt.Sprintf("%s/metrics/%s", p.url, strings.Join(urlComponents, "/")) } // encodeComponent encodes the provided string with base64.RawURLEncoding in // case it contains '/' and as "=" in case it is empty. If neither is the case, // it uses url.QueryEscape instead. It returns true in the former two cases. func encodeComponent(s string) (string, bool) { if s == "" { return "=", true } if strings.Contains(s, "/") { return base64.RawURLEncoding.EncodeToString([]byte(s)), true } return url.QueryEscape(s), false } client_golang-1.11.0/prometheus/push/push_test.go000066400000000000000000000171071405741072000221240ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package push import ( "bytes" "io/ioutil" "net/http" "net/http/httptest" "testing" "github.com/prometheus/common/expfmt" "github.com/prometheus/client_golang/prometheus" ) func TestPush(t *testing.T) { var ( lastMethod string lastBody []byte lastPath string ) // Fake a Pushgateway that responds with 202 to DELETE and with 200 in // all other cases. pgwOK := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { lastMethod = r.Method var err error lastBody, err = ioutil.ReadAll(r.Body) if err != nil { t.Fatal(err) } lastPath = r.URL.EscapedPath() w.Header().Set("Content-Type", `text/plain; charset=utf-8`) if r.Method == http.MethodDelete { w.WriteHeader(http.StatusAccepted) return } w.WriteHeader(http.StatusOK) }), ) defer pgwOK.Close() // Fake a Pushgateway that always responds with 500. pgwErr := httptest.NewServer( http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, "fake error", http.StatusInternalServerError) }), ) defer pgwErr.Close() metric1 := prometheus.NewCounter(prometheus.CounterOpts{ Name: "testname1", Help: "testhelp1", }) metric2 := prometheus.NewGauge(prometheus.GaugeOpts{ Name: "testname2", Help: "testhelp2", ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"}, }) reg := prometheus.NewRegistry() reg.MustRegister(metric1) reg.MustRegister(metric2) mfs, err := reg.Gather() if err != nil { t.Fatal(err) } buf := &bytes.Buffer{} enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) for _, mf := range mfs { if err := enc.Encode(mf); err != nil { t.Fatal(err) } } wantBody := buf.Bytes() // Push some Collectors, all good. if err := New(pgwOK.URL, "testjob"). Collector(metric1). Collector(metric2). Push(); err != nil { t.Fatal(err) } if lastMethod != http.MethodPut { t.Errorf("got method %q for Push, want %q", lastMethod, http.MethodPut) } if !bytes.Equal(lastBody, wantBody) { t.Errorf("got body %v, want %v", lastBody, wantBody) } if lastPath != "/metrics/job/testjob" { t.Error("unexpected path:", lastPath) } // Add some Collectors, with nil grouping, all good. if err := New(pgwOK.URL, "testjob"). Collector(metric1). Collector(metric2). Add(); err != nil { t.Fatal(err) } if lastMethod != http.MethodPost { t.Errorf("got method %q for Add, want %q", lastMethod, http.MethodPost) } if !bytes.Equal(lastBody, wantBody) { t.Errorf("got body %v, want %v", lastBody, wantBody) } if lastPath != "/metrics/job/testjob" { t.Error("unexpected path:", lastPath) } // Pushes that require base64 encoding. if err := New(pgwOK.URL, "test/job"). Collector(metric1). Collector(metric2). Push(); err != nil { t.Fatal(err) } if lastMethod != http.MethodPut { t.Errorf("got method %q for Push, want %q", lastMethod, http.MethodPut) } if !bytes.Equal(lastBody, wantBody) { t.Errorf("got body %v, want %v", lastBody, wantBody) } if lastPath != "/metrics/job@base64/dGVzdC9qb2I" { t.Error("unexpected path:", lastPath) } if err := New(pgwOK.URL, "testjob"). Grouping("foobar", "bu/ms"). Collector(metric1). Collector(metric2). Push(); err != nil { t.Fatal(err) } if lastMethod != http.MethodPut { t.Errorf("got method %q for Push, want %q", lastMethod, http.MethodPut) } if !bytes.Equal(lastBody, wantBody) { t.Errorf("got body %v, want %v", lastBody, wantBody) } if lastPath != "/metrics/job/testjob/foobar@base64/YnUvbXM" { t.Error("unexpected path:", lastPath) } // Push that requires URL encoding. if err := New(pgwOK.URL, "testjob"). Grouping("titan", "Προμηθεύς"). Collector(metric1). Collector(metric2). Push(); err != nil { t.Fatal(err) } if lastMethod != http.MethodPut { t.Errorf("got method %q for Push, want %q", lastMethod, http.MethodPut) } if !bytes.Equal(lastBody, wantBody) { t.Errorf("got body %v, want %v", lastBody, wantBody) } if lastPath != "/metrics/job/testjob/titan/%CE%A0%CF%81%CE%BF%CE%BC%CE%B7%CE%B8%CE%B5%CF%8D%CF%82" { t.Error("unexpected path:", lastPath) } // Empty label value triggers special base64 encoding. if err := New(pgwOK.URL, "testjob"). Grouping("empty", ""). Collector(metric1). Collector(metric2). Push(); err != nil { t.Fatal(err) } if lastMethod != http.MethodPut { t.Errorf("got method %q for Push, want %q", lastMethod, http.MethodPut) } if !bytes.Equal(lastBody, wantBody) { t.Errorf("got body %v, want %v", lastBody, wantBody) } if lastPath != "/metrics/job/testjob/empty@base64/=" { t.Error("unexpected path:", lastPath) } // Empty job name results in error. if err := New(pgwErr.URL, ""). Collector(metric1). Collector(metric2). Push(); err == nil { t.Error("push with empty job succeded") } else { if got, want := err, errJobEmpty; got != want { t.Errorf("got error %q, want %q", got, want) } } // Push some Collectors with a broken PGW. if err := New(pgwErr.URL, "testjob"). Collector(metric1). Collector(metric2). Push(); err == nil { t.Error("push to broken Pushgateway succeeded") } else { if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want { t.Errorf("got error %q, want %q", got, want) } } // Push some Collectors with invalid grouping or job. if err := New(pgwOK.URL, "testjob"). Grouping("foo", "bums"). Collector(metric1). Collector(metric2). Push(); err == nil { t.Error("push with grouping contained in metrics succeeded") } if err := New(pgwOK.URL, "testjob"). Grouping("foo-bar", "bums"). Collector(metric1). Collector(metric2). Push(); err == nil { t.Error("push with invalid grouping succeeded") } // Push registry, all good. if err := New(pgwOK.URL, "testjob"). Gatherer(reg). Push(); err != nil { t.Fatal(err) } if lastMethod != http.MethodPut { t.Errorf("got method %q for Push, want %q", lastMethod, http.MethodPut) } if !bytes.Equal(lastBody, wantBody) { t.Errorf("got body %v, want %v", lastBody, wantBody) } // Add registry, all good. if err := New(pgwOK.URL, "testjob"). Grouping("a", "x"). Grouping("b", "y"). Gatherer(reg). Add(); err != nil { t.Fatal(err) } if lastMethod != http.MethodPost { t.Errorf("got method %q for Add, want %q", lastMethod, http.MethodPost) } if !bytes.Equal(lastBody, wantBody) { t.Errorf("got body %v, want %v", lastBody, wantBody) } if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" { t.Error("unexpected path:", lastPath) } // Delete, all good. if err := New(pgwOK.URL, "testjob"). Grouping("a", "x"). Grouping("b", "y"). Delete(); err != nil { t.Fatal(err) } if lastMethod != http.MethodDelete { t.Errorf("got method %q for Delete, want %q", lastMethod, http.MethodDelete) } if len(lastBody) != 0 { t.Errorf("got body of length %d, want empty body", len(lastBody)) } if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" { t.Error("unexpected path:", lastPath) } } client_golang-1.11.0/prometheus/registry.go000066400000000000000000000762711405741072000210060ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "bytes" "fmt" "io/ioutil" "os" "path/filepath" "runtime" "sort" "strings" "sync" "unicode/utf8" "github.com/cespare/xxhash/v2" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" "github.com/prometheus/client_golang/prometheus/internal" ) const ( // Capacity for the channel to collect metrics and descriptors. capMetricChan = 1000 capDescChan = 10 ) // DefaultRegisterer and DefaultGatherer are the implementations of the // Registerer and Gatherer interface a number of convenience functions in this // package act on. Initially, both variables point to the same Registry, which // has a process collector (currently on Linux only, see NewProcessCollector) // and a Go collector (see NewGoCollector, in particular the note about // stop-the-world implication with Go versions older than 1.9) already // registered. This approach to keep default instances as global state mirrors // the approach of other packages in the Go standard library. Note that there // are caveats. Change the variables with caution and only if you understand the // consequences. Users who want to avoid global state altogether should not use // the convenience functions and act on custom instances instead. var ( defaultRegistry = NewRegistry() DefaultRegisterer Registerer = defaultRegistry DefaultGatherer Gatherer = defaultRegistry ) func init() { MustRegister(NewProcessCollector(ProcessCollectorOpts{})) MustRegister(NewGoCollector()) } // NewRegistry creates a new vanilla Registry without any Collectors // pre-registered. func NewRegistry() *Registry { return &Registry{ collectorsByID: map[uint64]Collector{}, descIDs: map[uint64]struct{}{}, dimHashesByName: map[string]uint64{}, } } // NewPedanticRegistry returns a registry that checks during collection if each // collected Metric is consistent with its reported Desc, and if the Desc has // actually been registered with the registry. Unchecked Collectors (those whose // Describe method does not yield any descriptors) are excluded from the check. // // Usually, a Registry will be happy as long as the union of all collected // Metrics is consistent and valid even if some metrics are not consistent with // their own Desc or a Desc provided by their registered Collector. Well-behaved // Collectors and Metrics will only provide consistent Descs. This Registry is // useful to test the implementation of Collectors and Metrics. func NewPedanticRegistry() *Registry { r := NewRegistry() r.pedanticChecksEnabled = true return r } // Registerer is the interface for the part of a registry in charge of // registering and unregistering. Users of custom registries should use // Registerer as type for registration purposes (rather than the Registry type // directly). In that way, they are free to use custom Registerer implementation // (e.g. for testing purposes). type Registerer interface { // Register registers a new Collector to be included in metrics // collection. It returns an error if the descriptors provided by the // Collector are invalid or if they — in combination with descriptors of // already registered Collectors — do not fulfill the consistency and // uniqueness criteria described in the documentation of metric.Desc. // // If the provided Collector is equal to a Collector already registered // (which includes the case of re-registering the same Collector), the // returned error is an instance of AlreadyRegisteredError, which // contains the previously registered Collector. // // A Collector whose Describe method does not yield any Desc is treated // as unchecked. Registration will always succeed. No check for // re-registering (see previous paragraph) is performed. Thus, the // caller is responsible for not double-registering the same unchecked // Collector, and for providing a Collector that will not cause // inconsistent metrics on collection. (This would lead to scrape // errors.) Register(Collector) error // MustRegister works like Register but registers any number of // Collectors and panics upon the first registration that causes an // error. MustRegister(...Collector) // Unregister unregisters the Collector that equals the Collector passed // in as an argument. (Two Collectors are considered equal if their // Describe method yields the same set of descriptors.) The function // returns whether a Collector was unregistered. Note that an unchecked // Collector cannot be unregistered (as its Describe method does not // yield any descriptor). // // Note that even after unregistering, it will not be possible to // register a new Collector that is inconsistent with the unregistered // Collector, e.g. a Collector collecting metrics with the same name but // a different help string. The rationale here is that the same registry // instance must only collect consistent metrics throughout its // lifetime. Unregister(Collector) bool } // Gatherer is the interface for the part of a registry in charge of gathering // the collected metrics into a number of MetricFamilies. The Gatherer interface // comes with the same general implication as described for the Registerer // interface. type Gatherer interface { // Gather calls the Collect method of the registered Collectors and then // gathers the collected metrics into a lexicographically sorted slice // of uniquely named MetricFamily protobufs. Gather ensures that the // returned slice is valid and self-consistent so that it can be used // for valid exposition. As an exception to the strict consistency // requirements described for metric.Desc, Gather will tolerate // different sets of label names for metrics of the same metric family. // // Even if an error occurs, Gather attempts to gather as many metrics as // possible. Hence, if a non-nil error is returned, the returned // MetricFamily slice could be nil (in case of a fatal error that // prevented any meaningful metric collection) or contain a number of // MetricFamily protobufs, some of which might be incomplete, and some // might be missing altogether. The returned error (which might be a // MultiError) explains the details. Note that this is mostly useful for // debugging purposes. If the gathered protobufs are to be used for // exposition in actual monitoring, it is almost always better to not // expose an incomplete result and instead disregard the returned // MetricFamily protobufs in case the returned error is non-nil. Gather() ([]*dto.MetricFamily, error) } // Register registers the provided Collector with the DefaultRegisterer. // // Register is a shortcut for DefaultRegisterer.Register(c). See there for more // details. func Register(c Collector) error { return DefaultRegisterer.Register(c) } // MustRegister registers the provided Collectors with the DefaultRegisterer and // panics if any error occurs. // // MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See // there for more details. func MustRegister(cs ...Collector) { DefaultRegisterer.MustRegister(cs...) } // Unregister removes the registration of the provided Collector from the // DefaultRegisterer. // // Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for // more details. func Unregister(c Collector) bool { return DefaultRegisterer.Unregister(c) } // GathererFunc turns a function into a Gatherer. type GathererFunc func() ([]*dto.MetricFamily, error) // Gather implements Gatherer. func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { return gf() } // AlreadyRegisteredError is returned by the Register method if the Collector to // be registered has already been registered before, or a different Collector // that collects the same metrics has been registered before. Registration fails // in that case, but you can detect from the kind of error what has // happened. The error contains fields for the existing Collector and the // (rejected) new Collector that equals the existing one. This can be used to // find out if an equal Collector has been registered before and switch over to // using the old one, as demonstrated in the example. type AlreadyRegisteredError struct { ExistingCollector, NewCollector Collector } func (err AlreadyRegisteredError) Error() string { return "duplicate metrics collector registration attempted" } // MultiError is a slice of errors implementing the error interface. It is used // by a Gatherer to report multiple errors during MetricFamily gathering. type MultiError []error // Error formats the contained errors as a bullet point list, preceded by the // total number of errors. Note that this results in a multi-line string. func (errs MultiError) Error() string { if len(errs) == 0 { return "" } buf := &bytes.Buffer{} fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) for _, err := range errs { fmt.Fprintf(buf, "\n* %s", err) } return buf.String() } // Append appends the provided error if it is not nil. func (errs *MultiError) Append(err error) { if err != nil { *errs = append(*errs, err) } } // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // contained error as error if len(errs is 1). In all other cases, it returns // the MultiError directly. This is helpful for returning a MultiError in a way // that only uses the MultiError if needed. func (errs MultiError) MaybeUnwrap() error { switch len(errs) { case 0: return nil case 1: return errs[0] default: return errs } } // Registry registers Prometheus collectors, collects their metrics, and gathers // them into MetricFamilies for exposition. It implements both Registerer and // Gatherer. The zero value is not usable. Create instances with NewRegistry or // NewPedanticRegistry. type Registry struct { mtx sync.RWMutex collectorsByID map[uint64]Collector // ID is a hash of the descIDs. descIDs map[uint64]struct{} dimHashesByName map[string]uint64 uncheckedCollectors []Collector pedanticChecksEnabled bool } // Register implements Registerer. func (r *Registry) Register(c Collector) error { var ( descChan = make(chan *Desc, capDescChan) newDescIDs = map[uint64]struct{}{} newDimHashesByName = map[string]uint64{} collectorID uint64 // All desc IDs XOR'd together. duplicateDescErr error ) go func() { c.Describe(descChan) close(descChan) }() r.mtx.Lock() defer func() { // Drain channel in case of premature return to not leak a goroutine. for range descChan { } r.mtx.Unlock() }() // Conduct various tests... for desc := range descChan { // Is the descriptor valid at all? if desc.err != nil { return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) } // Is the descID unique? // (In other words: Is the fqName + constLabel combination unique?) if _, exists := r.descIDs[desc.id]; exists { duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) } // If it is not a duplicate desc in this collector, XOR it to // the collectorID. (We allow duplicate descs within the same // collector, but their existence must be a no-op.) if _, exists := newDescIDs[desc.id]; !exists { newDescIDs[desc.id] = struct{}{} collectorID ^= desc.id } // Are all the label names and the help string consistent with // previous descriptors of the same name? // First check existing descriptors... if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } } else { // ...then check the new descriptors already seen. if dimHash, exists := newDimHashesByName[desc.fqName]; exists { if dimHash != desc.dimHash { return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } } else { newDimHashesByName[desc.fqName] = desc.dimHash } } } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { r.uncheckedCollectors = append(r.uncheckedCollectors, c) return nil } if existing, exists := r.collectorsByID[collectorID]; exists { switch e := existing.(type) { case *wrappingCollector: return AlreadyRegisteredError{ ExistingCollector: e.unwrapRecursively(), NewCollector: c, } default: return AlreadyRegisteredError{ ExistingCollector: e, NewCollector: c, } } } // If the collectorID is new, but at least one of the descs existed // before, we are in trouble. if duplicateDescErr != nil { return duplicateDescErr } // Only after all tests have passed, actually register. r.collectorsByID[collectorID] = c for hash := range newDescIDs { r.descIDs[hash] = struct{}{} } for name, dimHash := range newDimHashesByName { r.dimHashesByName[name] = dimHash } return nil } // Unregister implements Registerer. func (r *Registry) Unregister(c Collector) bool { var ( descChan = make(chan *Desc, capDescChan) descIDs = map[uint64]struct{}{} collectorID uint64 // All desc IDs XOR'd together. ) go func() { c.Describe(descChan) close(descChan) }() for desc := range descChan { if _, exists := descIDs[desc.id]; !exists { collectorID ^= desc.id descIDs[desc.id] = struct{}{} } } r.mtx.RLock() if _, exists := r.collectorsByID[collectorID]; !exists { r.mtx.RUnlock() return false } r.mtx.RUnlock() r.mtx.Lock() defer r.mtx.Unlock() delete(r.collectorsByID, collectorID) for id := range descIDs { delete(r.descIDs, id) } // dimHashesByName is left untouched as those must be consistent // throughout the lifetime of a program. return true } // MustRegister implements Registerer. func (r *Registry) MustRegister(cs ...Collector) { for _, c := range cs { if err := r.Register(c); err != nil { panic(err) } } } // Gather implements Gatherer. func (r *Registry) Gather() ([]*dto.MetricFamily, error) { var ( checkedMetricChan = make(chan Metric, capMetricChan) uncheckedMetricChan = make(chan Metric, capMetricChan) metricHashes = map[uint64]struct{}{} wg sync.WaitGroup errs MultiError // The collected errors to return in the end. registeredDescIDs map[uint64]struct{} // Only used for pedantic checks ) r.mtx.RLock() goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) checkedCollectors := make(chan Collector, len(r.collectorsByID)) uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) for _, collector := range r.collectorsByID { checkedCollectors <- collector } for _, collector := range r.uncheckedCollectors { uncheckedCollectors <- collector } // In case pedantic checks are enabled, we have to copy the map before // giving up the RLock. if r.pedanticChecksEnabled { registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) for id := range r.descIDs { registeredDescIDs[id] = struct{}{} } } r.mtx.RUnlock() wg.Add(goroutineBudget) collectWorker := func() { for { select { case collector := <-checkedCollectors: collector.Collect(checkedMetricChan) case collector := <-uncheckedCollectors: collector.Collect(uncheckedMetricChan) default: return } wg.Done() } } // Start the first worker now to make sure at least one is running. go collectWorker() goroutineBudget-- // Close checkedMetricChan and uncheckedMetricChan once all collectors // are collected. go func() { wg.Wait() close(checkedMetricChan) close(uncheckedMetricChan) }() // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. defer func() { if checkedMetricChan != nil { for range checkedMetricChan { } } if uncheckedMetricChan != nil { for range uncheckedMetricChan { } } }() // Copy the channel references so we can nil them out later to remove // them from the select statements below. cmc := checkedMetricChan umc := uncheckedMetricChan for { select { case metric, ok := <-cmc: if !ok { cmc = nil break } errs.Append(processMetric( metric, metricFamiliesByName, metricHashes, registeredDescIDs, )) case metric, ok := <-umc: if !ok { umc = nil break } errs.Append(processMetric( metric, metricFamiliesByName, metricHashes, nil, )) default: if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { // All collectors are already being worked on or // we have already as many goroutines started as // there are collectors. Do the same as above, // just without the default. select { case metric, ok := <-cmc: if !ok { cmc = nil break } errs.Append(processMetric( metric, metricFamiliesByName, metricHashes, registeredDescIDs, )) case metric, ok := <-umc: if !ok { umc = nil break } errs.Append(processMetric( metric, metricFamiliesByName, metricHashes, nil, )) } break } // Start more workers. go collectWorker() goroutineBudget-- runtime.Gosched() } // Once both checkedMetricChan and uncheckdMetricChan are closed // and drained, the contraption above will nil out cmc and umc, // and then we can leave the collect loop here. if cmc == nil && umc == nil { break } } return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } // WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the // Prometheus text format, and writes it to a temporary file. Upon success, the // temporary file is renamed to the provided filename. // // This is intended for use with the textfile collector of the node exporter. // Note that the node exporter expects the filename to be suffixed with ".prom". func WriteToTextfile(filename string, g Gatherer) error { tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) if err != nil { return err } defer os.Remove(tmp.Name()) mfs, err := g.Gather() if err != nil { return err } for _, mf := range mfs { if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { return err } } if err := tmp.Close(); err != nil { return err } if err := os.Chmod(tmp.Name(), 0644); err != nil { return err } return os.Rename(tmp.Name(), filename) } // processMetric is an internal helper method only used by the Gather method. func processMetric( metric Metric, metricFamiliesByName map[string]*dto.MetricFamily, metricHashes map[uint64]struct{}, registeredDescIDs map[uint64]struct{}, ) error { desc := metric.Desc() // Wrapped metrics collected by an unchecked Collector can have an // invalid Desc. if desc.err != nil { return desc.err } dtoMetric := &dto.Metric{} if err := metric.Write(dtoMetric); err != nil { return fmt.Errorf("error collecting metric %v: %s", desc, err) } metricFamily, ok := metricFamiliesByName[desc.fqName] if ok { // Existing name. if metricFamily.GetHelp() != desc.help { return fmt.Errorf( "collected metric %s %s has help %q but should have %q", desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), ) } // TODO(beorn7): Simplify switch once Desc has type. switch metricFamily.GetType() { case dto.MetricType_COUNTER: if dtoMetric.Counter == nil { return fmt.Errorf( "collected metric %s %s should be a Counter", desc.fqName, dtoMetric, ) } case dto.MetricType_GAUGE: if dtoMetric.Gauge == nil { return fmt.Errorf( "collected metric %s %s should be a Gauge", desc.fqName, dtoMetric, ) } case dto.MetricType_SUMMARY: if dtoMetric.Summary == nil { return fmt.Errorf( "collected metric %s %s should be a Summary", desc.fqName, dtoMetric, ) } case dto.MetricType_UNTYPED: if dtoMetric.Untyped == nil { return fmt.Errorf( "collected metric %s %s should be Untyped", desc.fqName, dtoMetric, ) } case dto.MetricType_HISTOGRAM: if dtoMetric.Histogram == nil { return fmt.Errorf( "collected metric %s %s should be a Histogram", desc.fqName, dtoMetric, ) } default: panic("encountered MetricFamily with invalid type") } } else { // New name. metricFamily = &dto.MetricFamily{} metricFamily.Name = proto.String(desc.fqName) metricFamily.Help = proto.String(desc.help) // TODO(beorn7): Simplify switch once Desc has type. switch { case dtoMetric.Gauge != nil: metricFamily.Type = dto.MetricType_GAUGE.Enum() case dtoMetric.Counter != nil: metricFamily.Type = dto.MetricType_COUNTER.Enum() case dtoMetric.Summary != nil: metricFamily.Type = dto.MetricType_SUMMARY.Enum() case dtoMetric.Untyped != nil: metricFamily.Type = dto.MetricType_UNTYPED.Enum() case dtoMetric.Histogram != nil: metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() default: return fmt.Errorf("empty metric collected: %s", dtoMetric) } if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { return err } metricFamiliesByName[desc.fqName] = metricFamily } if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { return err } if registeredDescIDs != nil { // Is the desc registered at all? if _, exist := registeredDescIDs[desc.id]; !exist { return fmt.Errorf( "collected metric %s %s with unregistered descriptor %s", metricFamily.GetName(), dtoMetric, desc, ) } if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { return err } } metricFamily.Metric = append(metricFamily.Metric, dtoMetric) return nil } // Gatherers is a slice of Gatherer instances that implements the Gatherer // interface itself. Its Gather method calls Gather on all Gatherers in the // slice in order and returns the merged results. Errors returned from the // Gather calls are all returned in a flattened MultiError. Duplicate and // inconsistent Metrics are skipped (first occurrence in slice order wins) and // reported in the returned error. // // Gatherers can be used to merge the Gather results from multiple // Registries. It also provides a way to directly inject existing MetricFamily // protobufs into the gathering by creating a custom Gatherer with a Gather // method that simply returns the existing MetricFamily protobufs. Note that no // registration is involved (in contrast to Collector registration), so // obviously registration-time checks cannot happen. Any inconsistencies between // the gathered MetricFamilies are reported as errors by the Gather method, and // inconsistent Metrics are dropped. Invalid parts of the MetricFamilies // (e.g. syntactically invalid metric or label names) will go undetected. type Gatherers []Gatherer // Gather implements Gatherer. func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { var ( metricFamiliesByName = map[string]*dto.MetricFamily{} metricHashes = map[uint64]struct{}{} errs MultiError // The collected errors to return in the end. ) for i, g := range gs { mfs, err := g.Gather() if err != nil { if multiErr, ok := err.(MultiError); ok { for _, err := range multiErr { errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) } } else { errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) } } for _, mf := range mfs { existingMF, exists := metricFamiliesByName[mf.GetName()] if exists { if existingMF.GetHelp() != mf.GetHelp() { errs = append(errs, fmt.Errorf( "gathered metric family %s has help %q but should have %q", mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), )) continue } if existingMF.GetType() != mf.GetType() { errs = append(errs, fmt.Errorf( "gathered metric family %s has type %s but should have %s", mf.GetName(), mf.GetType(), existingMF.GetType(), )) continue } } else { existingMF = &dto.MetricFamily{} existingMF.Name = mf.Name existingMF.Help = mf.Help existingMF.Type = mf.Type if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { errs = append(errs, err) continue } metricFamiliesByName[mf.GetName()] = existingMF } for _, m := range mf.Metric { if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { errs = append(errs, err) continue } existingMF.Metric = append(existingMF.Metric, m) } } } return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() } // checkSuffixCollisions checks for collisions with the “magic” suffixes the // Prometheus text format and the internal metric representation of the // Prometheus server add while flattening Summaries and Histograms. func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { var ( newName = mf.GetName() newType = mf.GetType() newNameWithoutSuffix = "" ) switch { case strings.HasSuffix(newName, "_count"): newNameWithoutSuffix = newName[:len(newName)-6] case strings.HasSuffix(newName, "_sum"): newNameWithoutSuffix = newName[:len(newName)-4] case strings.HasSuffix(newName, "_bucket"): newNameWithoutSuffix = newName[:len(newName)-7] } if newNameWithoutSuffix != "" { if existingMF, ok := mfs[newNameWithoutSuffix]; ok { switch existingMF.GetType() { case dto.MetricType_SUMMARY: if !strings.HasSuffix(newName, "_bucket") { return fmt.Errorf( "collected metric named %q collides with previously collected summary named %q", newName, newNameWithoutSuffix, ) } case dto.MetricType_HISTOGRAM: return fmt.Errorf( "collected metric named %q collides with previously collected histogram named %q", newName, newNameWithoutSuffix, ) } } } if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { if _, ok := mfs[newName+"_count"]; ok { return fmt.Errorf( "collected histogram or summary named %q collides with previously collected metric named %q", newName, newName+"_count", ) } if _, ok := mfs[newName+"_sum"]; ok { return fmt.Errorf( "collected histogram or summary named %q collides with previously collected metric named %q", newName, newName+"_sum", ) } } if newType == dto.MetricType_HISTOGRAM { if _, ok := mfs[newName+"_bucket"]; ok { return fmt.Errorf( "collected histogram named %q collides with previously collected metric named %q", newName, newName+"_bucket", ) } } return nil } // checkMetricConsistency checks if the provided Metric is consistent with the // provided MetricFamily. It also hashes the Metric labels and the MetricFamily // name. If the resulting hash is already in the provided metricHashes, an error // is returned. If not, it is added to metricHashes. func checkMetricConsistency( metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, metricHashes map[uint64]struct{}, ) error { name := metricFamily.GetName() // Type consistency with metric family. if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { return fmt.Errorf( "collected metric %q { %s} is not a %s", name, dtoMetric, metricFamily.GetType(), ) } previousLabelName := "" for _, labelPair := range dtoMetric.GetLabel() { labelName := labelPair.GetName() if labelName == previousLabelName { return fmt.Errorf( "collected metric %q { %s} has two or more labels with the same name: %s", name, dtoMetric, labelName, ) } if !checkLabelName(labelName) { return fmt.Errorf( "collected metric %q { %s} has a label with an invalid name: %s", name, dtoMetric, labelName, ) } if dtoMetric.Summary != nil && labelName == quantileLabel { return fmt.Errorf( "collected metric %q { %s} must not have an explicit %q label", name, dtoMetric, quantileLabel, ) } if !utf8.ValidString(labelPair.GetValue()) { return fmt.Errorf( "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", name, dtoMetric, labelName, labelPair.GetValue()) } previousLabelName = labelName } // Is the metric unique (i.e. no other metric with the same name and the same labels)? h := xxhash.New() h.WriteString(name) h.Write(separatorByteSlice) // Make sure label pairs are sorted. We depend on it for the consistency // check. if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { // We cannot sort dtoMetric.Label in place as it is immutable by contract. copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) copy(copiedLabels, dtoMetric.Label) sort.Sort(labelPairSorter(copiedLabels)) dtoMetric.Label = copiedLabels } for _, lp := range dtoMetric.Label { h.WriteString(lp.GetName()) h.Write(separatorByteSlice) h.WriteString(lp.GetValue()) h.Write(separatorByteSlice) } hSum := h.Sum64() if _, exists := metricHashes[hSum]; exists { return fmt.Errorf( "collected metric %q { %s} was collected before with the same name and label values", name, dtoMetric, ) } metricHashes[hSum] = struct{}{} return nil } func checkDescConsistency( metricFamily *dto.MetricFamily, dtoMetric *dto.Metric, desc *Desc, ) error { // Desc help consistency with metric family help. if metricFamily.GetHelp() != desc.help { return fmt.Errorf( "collected metric %s %s has help %q but should have %q", metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, ) } // Is the desc consistent with the content of the metric? lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) copy(lpsFromDesc, desc.constLabelPairs) for _, l := range desc.variableLabels { lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ Name: proto.String(l), }) } if len(lpsFromDesc) != len(dtoMetric.Label) { return fmt.Errorf( "labels in collected metric %s %s are inconsistent with descriptor %s", metricFamily.GetName(), dtoMetric, desc, ) } sort.Sort(labelPairSorter(lpsFromDesc)) for i, lpFromDesc := range lpsFromDesc { lpFromMetric := dtoMetric.Label[i] if lpFromDesc.GetName() != lpFromMetric.GetName() || lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { return fmt.Errorf( "labels in collected metric %s %s are inconsistent with descriptor %s", metricFamily.GetName(), dtoMetric, desc, ) } } return nil } client_golang-1.11.0/prometheus/registry_test.go000066400000000000000000000771431405741072000220440ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Copyright (c) 2013, The Prometheus Authors // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file. package prometheus_test import ( "bytes" "fmt" "io/ioutil" "math/rand" "net/http" "net/http/httptest" "os" "sync" "testing" "time" dto "github.com/prometheus/client_model/go" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/prometheus/common/expfmt" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) // uncheckedCollector wraps a Collector but its Describe method yields no Desc. type uncheckedCollector struct { c prometheus.Collector } func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {} func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) { u.c.Collect(c) } func testHandler(t testing.TB) { // TODO(beorn7): This test is a bit too "end-to-end". It tests quite a // few moving parts that are not strongly coupled. They could/should be // tested separately. However, the changes planned for v2 will // require a major rework of this test anyway, at which time I will // structure it in a better way. metricVec := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "name", Help: "docstring", ConstLabels: prometheus.Labels{"constname": "constvalue"}, }, []string{"labelname"}, ) metricVec.WithLabelValues("val1").Inc() metricVec.WithLabelValues("val2").Inc() externalMetricFamily := &dto.MetricFamily{ Name: proto.String("externalname"), Help: proto.String("externaldocstring"), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("externalconstname"), Value: proto.String("externalconstvalue"), }, { Name: proto.String("externallabelname"), Value: proto.String("externalval1"), }, }, Counter: &dto.Counter{ Value: proto.Float64(1), }, }, }, } externalBuf := &bytes.Buffer{} enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) if err := enc.Encode(externalMetricFamily); err != nil { t.Fatal(err) } externalMetricFamilyAsBytes := externalBuf.Bytes() externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring # TYPE externalname counter externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 `) externalMetricFamilyAsProtoText := []byte(`name: "externalname" help: "externaldocstring" type: COUNTER metric: < label: < name: "externalconstname" value: "externalconstvalue" > label: < name: "externallabelname" value: "externalval1" > counter: < value: 1 > > `) externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > `) expectedMetricFamily := &dto.MetricFamily{ Name: proto.String("name"), Help: proto.String("docstring"), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("constname"), Value: proto.String("constvalue"), }, { Name: proto.String("labelname"), Value: proto.String("val1"), }, }, Counter: &dto.Counter{ Value: proto.Float64(1), }, }, { Label: []*dto.LabelPair{ { Name: proto.String("constname"), Value: proto.String("constvalue"), }, { Name: proto.String("labelname"), Value: proto.String("val2"), }, }, Counter: &dto.Counter{ Value: proto.Float64(1), }, }, }, } buf := &bytes.Buffer{} enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) if err := enc.Encode(expectedMetricFamily); err != nil { t.Fatal(err) } expectedMetricFamilyAsBytes := buf.Bytes() expectedMetricFamilyAsText := []byte(`# HELP name docstring # TYPE name counter name{constname="constvalue",labelname="val1"} 1 name{constname="constvalue",labelname="val2"} 1 `) expectedMetricFamilyAsProtoText := []byte(`name: "name" help: "docstring" type: COUNTER metric: < label: < name: "constname" value: "constvalue" > label: < name: "labelname" value: "val1" > counter: < value: 1 > > metric: < label: < name: "constname" value: "constvalue" > label: < name: "labelname" value: "val2" > counter: < value: 1 > > `) expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > `) externalMetricFamilyWithSameName := &dto.MetricFamily{ Name: proto.String("name"), Help: proto.String("docstring"), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("constname"), Value: proto.String("constvalue"), }, { Name: proto.String("labelname"), Value: proto.String("different_val"), }, }, Counter: &dto.Counter{ Value: proto.Float64(42), }, }, }, } expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > `) externalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{ Name: proto.String("name"), Help: proto.String("docstring"), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("constname"), Value: proto.String("\xFF"), }, { Name: proto.String("labelname"), Value: proto.String("different_val"), }, }, Counter: &dto.Counter{ Value: proto.Float64(42), }, }, }, } expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred while serving metrics: collected metric "name" { label: label: counter: } has a label named "constname" whose value is not utf8: "\xff" `) summary := prometheus.NewSummary(prometheus.SummaryOpts{ Name: "complex", Help: "A metric to check collisions with _sum and _count.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) summaryAsText := []byte(`# HELP complex A metric to check collisions with _sum and _count. # TYPE complex summary complex{quantile="0.5"} NaN complex{quantile="0.9"} NaN complex{quantile="0.99"} NaN complex_sum 0 complex_count 0 `) histogram := prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "complex", Help: "A metric to check collisions with _sun, _count, and _bucket.", }) externalMetricFamilyWithBucketSuffix := &dto.MetricFamily{ Name: proto.String("complex_bucket"), Help: proto.String("externaldocstring"), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { Counter: &dto.Counter{ Value: proto.Float64(1), }, }, }, } externalMetricFamilyWithBucketSuffixAsText := []byte(`# HELP complex_bucket externaldocstring # TYPE complex_bucket counter complex_bucket 1 `) externalMetricFamilyWithCountSuffix := &dto.MetricFamily{ Name: proto.String("complex_count"), Help: proto.String("externaldocstring"), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { Counter: &dto.Counter{ Value: proto.Float64(1), }, }, }, } bucketCollisionMsg := []byte(`An error has occurred while serving metrics: collected metric named "complex_bucket" collides with previously collected histogram named "complex" `) summaryCountCollisionMsg := []byte(`An error has occurred while serving metrics: collected metric named "complex_count" collides with previously collected summary named "complex" `) histogramCountCollisionMsg := []byte(`An error has occurred while serving metrics: collected metric named "complex_count" collides with previously collected histogram named "complex" `) externalMetricFamilyWithDuplicateLabel := &dto.MetricFamily{ Name: proto.String("broken_metric"), Help: proto.String("The registry should detect the duplicate label."), Type: dto.MetricType_COUNTER.Enum(), Metric: []*dto.Metric{ { Label: []*dto.LabelPair{ { Name: proto.String("foo"), Value: proto.String("bar"), }, { Name: proto.String("foo"), Value: proto.String("baz"), }, }, Counter: &dto.Counter{ Value: proto.Float64(2.7), }, }, }, } duplicateLabelMsg := []byte(`An error has occurred while serving metrics: collected metric "broken_metric" { label: label: counter: } has two or more labels with the same name: foo `) type output struct { headers map[string]string body []byte } var scenarios = []struct { headers map[string]string out output collector prometheus.Collector externalMF []*dto.MetricFamily }{ { // 0 headers: map[string]string{ "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: []byte{}, }, }, { // 1 headers: map[string]string{ "Accept": "foo/bar;q=0.2, application/quark;q=0.8", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: []byte{}, }, }, { // 2 headers: map[string]string{ "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: []byte{}, }, }, { // 3 headers: map[string]string{ "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", }, out: output{ headers: map[string]string{ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, }, body: []byte{}, }, }, { // 4 headers: map[string]string{ "Accept": "application/json", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: expectedMetricFamilyAsText, }, collector: metricVec, }, { // 5 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", }, out: output{ headers: map[string]string{ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, }, body: expectedMetricFamilyAsBytes, }, collector: metricVec, }, { // 6 headers: map[string]string{ "Accept": "application/json", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: externalMetricFamilyAsText, }, externalMF: []*dto.MetricFamily{externalMetricFamily}, }, { // 7 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", }, out: output{ headers: map[string]string{ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, }, body: externalMetricFamilyAsBytes, }, externalMF: []*dto.MetricFamily{externalMetricFamily}, }, { // 8 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", }, out: output{ headers: map[string]string{ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, }, body: bytes.Join( [][]byte{ externalMetricFamilyAsBytes, expectedMetricFamilyAsBytes, }, []byte{}, ), }, collector: metricVec, externalMF: []*dto.MetricFamily{externalMetricFamily}, }, { // 9 headers: map[string]string{ "Accept": "text/plain", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: []byte{}, }, }, { // 10 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: expectedMetricFamilyAsText, }, collector: metricVec, }, { // 11 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: bytes.Join( [][]byte{ externalMetricFamilyAsText, expectedMetricFamilyAsText, }, []byte{}, ), }, collector: metricVec, externalMF: []*dto.MetricFamily{externalMetricFamily}, }, { // 12 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", }, out: output{ headers: map[string]string{ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, }, body: bytes.Join( [][]byte{ externalMetricFamilyAsBytes, expectedMetricFamilyAsBytes, }, []byte{}, ), }, collector: metricVec, externalMF: []*dto.MetricFamily{externalMetricFamily}, }, { // 13 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", }, out: output{ headers: map[string]string{ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, }, body: bytes.Join( [][]byte{ externalMetricFamilyAsProtoText, expectedMetricFamilyAsProtoText, }, []byte{}, ), }, collector: metricVec, externalMF: []*dto.MetricFamily{externalMetricFamily}, }, { // 14 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", }, out: output{ headers: map[string]string{ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, }, body: bytes.Join( [][]byte{ externalMetricFamilyAsProtoCompactText, expectedMetricFamilyAsProtoCompactText, }, []byte{}, ), }, collector: metricVec, externalMF: []*dto.MetricFamily{externalMetricFamily}, }, { // 15 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", }, out: output{ headers: map[string]string{ "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, }, body: bytes.Join( [][]byte{ externalMetricFamilyAsProtoCompactText, expectedMetricFamilyMergedWithExternalAsProtoCompactText, }, []byte{}, ), }, collector: metricVec, externalMF: []*dto.MetricFamily{ externalMetricFamily, externalMetricFamilyWithSameName, }, }, { // 16 headers: map[string]string{ "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; charset=utf-8`, }, body: expectedMetricFamilyInvalidLabelValueAsText, }, collector: metricVec, externalMF: []*dto.MetricFamily{ externalMetricFamily, externalMetricFamilyWithInvalidLabelValue, }, }, { // 17 headers: map[string]string{ "Accept": "text/plain", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: expectedMetricFamilyAsText, }, collector: uncheckedCollector{metricVec}, }, { // 18 headers: map[string]string{ "Accept": "text/plain", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; charset=utf-8`, }, body: histogramCountCollisionMsg, }, collector: histogram, externalMF: []*dto.MetricFamily{ externalMetricFamilyWithCountSuffix, }, }, { // 19 headers: map[string]string{ "Accept": "text/plain", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; charset=utf-8`, }, body: bucketCollisionMsg, }, collector: histogram, externalMF: []*dto.MetricFamily{ externalMetricFamilyWithBucketSuffix, }, }, { // 20 headers: map[string]string{ "Accept": "text/plain", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; charset=utf-8`, }, body: summaryCountCollisionMsg, }, collector: summary, externalMF: []*dto.MetricFamily{ externalMetricFamilyWithCountSuffix, }, }, { // 21 headers: map[string]string{ "Accept": "text/plain", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; version=0.0.4; charset=utf-8`, }, body: bytes.Join( [][]byte{ summaryAsText, externalMetricFamilyWithBucketSuffixAsText, }, []byte{}, ), }, collector: summary, externalMF: []*dto.MetricFamily{ externalMetricFamilyWithBucketSuffix, }, }, { // 22 headers: map[string]string{ "Accept": "text/plain", }, out: output{ headers: map[string]string{ "Content-Type": `text/plain; charset=utf-8`, }, body: duplicateLabelMsg, }, externalMF: []*dto.MetricFamily{ externalMetricFamilyWithDuplicateLabel, }, }, } for i, scenario := range scenarios { registry := prometheus.NewPedanticRegistry() gatherer := prometheus.Gatherer(registry) if scenario.externalMF != nil { gatherer = prometheus.Gatherers{ registry, prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return scenario.externalMF, nil }), } } if scenario.collector != nil { registry.MustRegister(scenario.collector) } writer := httptest.NewRecorder() handler := promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}) request, _ := http.NewRequest("GET", "/", nil) for key, value := range scenario.headers { request.Header.Add(key, value) } handler.ServeHTTP(writer, request) for key, value := range scenario.out.headers { if writer.Header().Get(key) != value { t.Errorf( "%d. expected %q for header %q, got %q", i, value, key, writer.Header().Get(key), ) } } if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { t.Errorf( "%d. expected body:\n%s\ngot body:\n%s\n", i, scenario.out.body, writer.Body.Bytes(), ) } } } func TestHandler(t *testing.T) { testHandler(t) } func BenchmarkHandler(b *testing.B) { for i := 0; i < b.N; i++ { testHandler(b) } } func TestAlreadyRegistered(t *testing.T) { original := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "test", Help: "help", ConstLabels: prometheus.Labels{"const": "label"}, }, []string{"foo", "bar"}, ) equalButNotSame := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "test", Help: "help", ConstLabels: prometheus.Labels{"const": "label"}, }, []string{"foo", "bar"}, ) originalWithoutConstLabel := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "test", Help: "help", }, []string{"foo", "bar"}, ) equalButNotSameWithoutConstLabel := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "test", Help: "help", }, []string{"foo", "bar"}, ) scenarios := []struct { name string originalCollector prometheus.Collector registerWith func(prometheus.Registerer) prometheus.Registerer newCollector prometheus.Collector reRegisterWith func(prometheus.Registerer) prometheus.Registerer }{ { "RegisterNormallyReregisterNormally", original, func(r prometheus.Registerer) prometheus.Registerer { return r }, equalButNotSame, func(r prometheus.Registerer) prometheus.Registerer { return r }, }, { "RegisterNormallyReregisterWrapped", original, func(r prometheus.Registerer) prometheus.Registerer { return r }, equalButNotSameWithoutConstLabel, func(r prometheus.Registerer) prometheus.Registerer { return prometheus.WrapRegistererWith(prometheus.Labels{"const": "label"}, r) }, }, { "RegisterWrappedReregisterWrapped", originalWithoutConstLabel, func(r prometheus.Registerer) prometheus.Registerer { return prometheus.WrapRegistererWith(prometheus.Labels{"const": "label"}, r) }, equalButNotSameWithoutConstLabel, func(r prometheus.Registerer) prometheus.Registerer { return prometheus.WrapRegistererWith(prometheus.Labels{"const": "label"}, r) }, }, { "RegisterWrappedReregisterNormally", originalWithoutConstLabel, func(r prometheus.Registerer) prometheus.Registerer { return prometheus.WrapRegistererWith(prometheus.Labels{"const": "label"}, r) }, equalButNotSame, func(r prometheus.Registerer) prometheus.Registerer { return r }, }, { "RegisterDoublyWrappedReregisterDoublyWrapped", originalWithoutConstLabel, func(r prometheus.Registerer) prometheus.Registerer { return prometheus.WrapRegistererWithPrefix( "wrap_", prometheus.WrapRegistererWith(prometheus.Labels{"const": "label"}, r), ) }, equalButNotSameWithoutConstLabel, func(r prometheus.Registerer) prometheus.Registerer { return prometheus.WrapRegistererWithPrefix( "wrap_", prometheus.WrapRegistererWith(prometheus.Labels{"const": "label"}, r), ) }, }, } for _, s := range scenarios { t.Run(s.name, func(t *testing.T) { var err error reg := prometheus.NewRegistry() if err = s.registerWith(reg).Register(s.originalCollector); err != nil { t.Fatal(err) } if err = s.reRegisterWith(reg).Register(s.newCollector); err == nil { t.Fatal("expected error when registering new collector") } if are, ok := err.(prometheus.AlreadyRegisteredError); ok { if are.ExistingCollector != s.originalCollector { t.Error("expected original collector but got something else") } if are.ExistingCollector == s.newCollector { t.Error("expected original collector but got new one") } } else { t.Error("unexpected error:", err) } }) } } // TestRegisterUnregisterCollector ensures registering and unregistering a // collector doesn't leave any dangling metrics. // We use NewGoCollector as a nice concrete example of a collector with // multiple metrics. func TestRegisterUnregisterCollector(t *testing.T) { col := prometheus.NewGoCollector() reg := prometheus.NewRegistry() reg.MustRegister(col) reg.Unregister(col) if metrics, err := reg.Gather(); err != nil { t.Error("error gathering sample metric") } else if len(metrics) != 0 { t.Error("should have unregistered metric") } } // TestHistogramVecRegisterGatherConcurrency is an end-to-end test that // concurrently calls Observe on random elements of a HistogramVec while the // same HistogramVec is registered concurrently and the Gather method of the // registry is called concurrently. func TestHistogramVecRegisterGatherConcurrency(t *testing.T) { labelNames := make([]string, 16) // Need at least 13 to expose #512. for i := range labelNames { labelNames[i] = fmt.Sprint("label_", i) } var ( reg = prometheus.NewPedanticRegistry() hv = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "test_histogram", Help: "This helps testing.", ConstLabels: prometheus.Labels{"foo": "bar"}, }, labelNames, ) labelValues = []string{"a", "b", "c", "alpha", "beta", "gamma", "aleph", "beth", "gimel"} quit = make(chan struct{}) wg sync.WaitGroup ) observe := func() { defer wg.Done() for { select { case <-quit: return default: obs := rand.NormFloat64()*.1 + .2 values := make([]string, 0, len(labelNames)) for range labelNames { values = append(values, labelValues[rand.Intn(len(labelValues))]) } hv.WithLabelValues(values...).Observe(obs) } } } register := func() { defer wg.Done() for { select { case <-quit: return default: if err := reg.Register(hv); err != nil { if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { t.Error("Registering failed:", err) } } time.Sleep(7 * time.Millisecond) } } } gather := func() { defer wg.Done() for { select { case <-quit: return default: if g, err := reg.Gather(); err != nil { t.Error("Gathering failed:", err) } else { if len(g) == 0 { continue } if len(g) != 1 { t.Error("Gathered unexpected number of metric families:", len(g)) } if len(g[0].Metric[0].Label) != len(labelNames)+1 { t.Error("Gathered unexpected number of label pairs:", len(g[0].Metric[0].Label)) } } time.Sleep(4 * time.Millisecond) } } } wg.Add(10) go observe() go observe() go register() go observe() go gather() go observe() go register() go observe() go gather() go observe() time.Sleep(time.Second) close(quit) wg.Wait() } func TestWriteToTextfile(t *testing.T) { expectedOut := `# HELP test_counter test counter # TYPE test_counter counter test_counter{name="qux"} 1 # HELP test_gauge test gauge # TYPE test_gauge gauge test_gauge{name="baz"} 1.1 # HELP test_hist test histogram # TYPE test_hist histogram test_hist_bucket{name="bar",le="0.005"} 0 test_hist_bucket{name="bar",le="0.01"} 0 test_hist_bucket{name="bar",le="0.025"} 0 test_hist_bucket{name="bar",le="0.05"} 0 test_hist_bucket{name="bar",le="0.1"} 0 test_hist_bucket{name="bar",le="0.25"} 0 test_hist_bucket{name="bar",le="0.5"} 0 test_hist_bucket{name="bar",le="1"} 1 test_hist_bucket{name="bar",le="2.5"} 1 test_hist_bucket{name="bar",le="5"} 2 test_hist_bucket{name="bar",le="10"} 2 test_hist_bucket{name="bar",le="+Inf"} 2 test_hist_sum{name="bar"} 3.64 test_hist_count{name="bar"} 2 # HELP test_summary test summary # TYPE test_summary summary test_summary{name="foo",quantile="0.5"} 10 test_summary{name="foo",quantile="0.9"} 20 test_summary{name="foo",quantile="0.99"} 20 test_summary_sum{name="foo"} 30 test_summary_count{name="foo"} 2 ` registry := prometheus.NewRegistry() summary := prometheus.NewSummaryVec( prometheus.SummaryOpts{ Name: "test_summary", Help: "test summary", Objectives: map[float64]float64{ 0.5: 0.05, 0.9: 0.01, 0.99: 0.001, }, }, []string{"name"}, ) histogram := prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "test_hist", Help: "test histogram", }, []string{"name"}, ) gauge := prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "test_gauge", Help: "test gauge", }, []string{"name"}, ) counter := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "test_counter", Help: "test counter", }, []string{"name"}, ) registry.MustRegister(summary) registry.MustRegister(histogram) registry.MustRegister(gauge) registry.MustRegister(counter) summary.With(prometheus.Labels{"name": "foo"}).Observe(10) summary.With(prometheus.Labels{"name": "foo"}).Observe(20) histogram.With(prometheus.Labels{"name": "bar"}).Observe(0.93) histogram.With(prometheus.Labels{"name": "bar"}).Observe(2.71) gauge.With(prometheus.Labels{"name": "baz"}).Set(1.1) counter.With(prometheus.Labels{"name": "qux"}).Inc() tmpfile, err := ioutil.TempFile("", "prom_registry_test") if err != nil { t.Fatal(err) } defer os.Remove(tmpfile.Name()) if err := prometheus.WriteToTextfile(tmpfile.Name(), registry); err != nil { t.Fatal(err) } fileBytes, err := ioutil.ReadFile(tmpfile.Name()) if err != nil { t.Fatal(err) } fileContents := string(fileBytes) if fileContents != expectedOut { t.Errorf( "files don't match, got:\n%s\nwant:\n%s", fileContents, expectedOut, ) } } // collidingCollector is a collection of prometheus.Collectors, // and is itself a prometheus.Collector. type collidingCollector struct { i int name string a, b, c, d prometheus.Collector } // Describe satisifies part of the prometheus.Collector interface. func (m *collidingCollector) Describe(desc chan<- *prometheus.Desc) { m.a.Describe(desc) m.b.Describe(desc) m.c.Describe(desc) m.d.Describe(desc) } // Collect satisifies part of the prometheus.Collector interface. func (m *collidingCollector) Collect(metric chan<- prometheus.Metric) { m.a.Collect(metric) m.b.Collect(metric) m.c.Collect(metric) m.d.Collect(metric) } // TestAlreadyRegistered will fail with the old, weaker hash function. It is // taken from https://play.golang.org/p/HpV7YE6LI_4 , authored by @awilliams. func TestAlreadyRegisteredCollision(t *testing.T) { reg := prometheus.NewRegistry() for i := 0; i < 10000; i++ { // A collector should be considered unique if its name and const // label values are unique. name := fmt.Sprintf("test-collector-%010d", i) collector := collidingCollector{ i: i, name: name, a: prometheus.NewCounter(prometheus.CounterOpts{ Name: "my_collector_a", ConstLabels: prometheus.Labels{ "name": name, "type": "test", }, }), b: prometheus.NewCounter(prometheus.CounterOpts{ Name: "my_collector_b", ConstLabels: prometheus.Labels{ "name": name, "type": "test", }, }), c: prometheus.NewCounter(prometheus.CounterOpts{ Name: "my_collector_c", ConstLabels: prometheus.Labels{ "name": name, "type": "test", }, }), d: prometheus.NewCounter(prometheus.CounterOpts{ Name: "my_collector_d", ConstLabels: prometheus.Labels{ "name": name, "type": "test", }, }), } // Register should not fail, since each collector has a unique // set of sub-collectors, determined by their names and const label values. if err := reg.Register(&collector); err != nil { alreadyRegErr, ok := err.(prometheus.AlreadyRegisteredError) if !ok { t.Fatal(err) } previous := alreadyRegErr.ExistingCollector.(*collidingCollector) current := alreadyRegErr.NewCollector.(*collidingCollector) t.Errorf("Unexpected registration error: %q\nprevious collector: %s (i=%d)\ncurrent collector %s (i=%d)", alreadyRegErr, previous.name, previous.i, current.name, current.i) } } } client_golang-1.11.0/prometheus/summary.go000066400000000000000000000602711405741072000206240ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "math" "runtime" "sort" "sync" "sync/atomic" "time" "github.com/beorn7/perks/quantile" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" ) // quantileLabel is used for the label that defines the quantile in a // summary. const quantileLabel = "quantile" // A Summary captures individual observations from an event or sample stream and // summarizes them in a manner similar to traditional summary statistics: 1. sum // of observations, 2. observation count, 3. rank estimations. // // A typical use-case is the observation of request latencies. By default, a // Summary provides the median, the 90th and the 99th percentile of the latency // as rank estimations. However, the default behavior will change in the // upcoming v1.0.0 of the library. There will be no rank estimations at all by // default. For a sane transition, it is recommended to set the desired rank // estimations explicitly. // // Note that the rank estimations cannot be aggregated in a meaningful way with // the Prometheus query language (i.e. you cannot average or add them). If you // need aggregatable quantiles (e.g. you want the 99th percentile latency of all // queries served across all instances of a service), consider the Histogram // metric type. See the Prometheus documentation for more details. // // To create Summary instances, use NewSummary. type Summary interface { Metric Collector // Observe adds a single observation to the summary. Observations are // usually positive or zero. Negative observations are accepted but // prevent current versions of Prometheus from properly detecting // counter resets in the sum of observations. See // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations // for details. Observe(float64) } var errQuantileLabelNotAllowed = fmt.Errorf( "%q is not allowed as label name in summaries", quantileLabel, ) // Default values for SummaryOpts. const ( // DefMaxAge is the default duration for which observations stay // relevant. DefMaxAge time.Duration = 10 * time.Minute // DefAgeBuckets is the default number of buckets used to calculate the // age of observations. DefAgeBuckets = 5 // DefBufCap is the standard buffer size for collecting Summary observations. DefBufCap = 500 ) // SummaryOpts bundles the options for creating a Summary metric. It is // mandatory to set Name to a non-empty string. While all other fields are // optional and can safely be left at their zero value, it is recommended to set // a help string and to explicitly set the Objectives field to the desired value // as the default value will change in the upcoming v1.0.0 of the library. type SummaryOpts struct { // Namespace, Subsystem, and Name are components of the fully-qualified // name of the Summary (created by joining these components with // "_"). Only Name is mandatory, the others merely help structuring the // name. Note that the fully-qualified name of the Summary must be a // valid Prometheus metric name. Namespace string Subsystem string Name string // Help provides information about this Summary. // // Metrics with the same fully-qualified name must have the same Help // string. Help string // ConstLabels are used to attach fixed labels to this metric. Metrics // with the same fully-qualified name must have the same label names in // their ConstLabels. // // Due to the way a Summary is represented in the Prometheus text format // and how it is handled by the Prometheus server internally, “quantile” // is an illegal label name. Construction of a Summary or SummaryVec // will panic if this label name is used in ConstLabels. // // ConstLabels are only used rarely. In particular, do not use them to // attach the same labels to all your metrics. Those use cases are // better covered by target labels set by the scraping Prometheus // server, or by one specific metric (e.g. a build_info or a // machine_role metric). See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels ConstLabels Labels // Objectives defines the quantile rank estimates with their respective // absolute error. If Objectives[q] = e, then the value reported for q // will be the φ-quantile value for some φ between q-e and q+e. The // default value is an empty map, resulting in a summary without // quantiles. Objectives map[float64]float64 // MaxAge defines the duration for which an observation stays relevant // for the summary. Only applies to pre-calculated quantiles, does not // apply to _sum and _count. Must be positive. The default value is // DefMaxAge. MaxAge time.Duration // AgeBuckets is the number of buckets used to exclude observations that // are older than MaxAge from the summary. A higher number has a // resource penalty, so only increase it if the higher resolution is // really required. For very high observation rates, you might want to // reduce the number of age buckets. With only one age bucket, you will // effectively see a complete reset of the summary each time MaxAge has // passed. The default value is DefAgeBuckets. AgeBuckets uint32 // BufCap defines the default sample stream buffer size. The default // value of DefBufCap should suffice for most uses. If there is a need // to increase the value, a multiple of 500 is recommended (because that // is the internal buffer size of the underlying package // "github.com/bmizerany/perks/quantile"). BufCap uint32 } // Problem with the sliding-window decay algorithm... The Merge method of // perk/quantile is actually not working as advertised - and it might be // unfixable, as the underlying algorithm is apparently not capable of merging // summaries in the first place. To avoid using Merge, we are currently adding // observations to _each_ age bucket, i.e. the effort to add a sample is // essentially multiplied by the number of age buckets. When rotating age // buckets, we empty the previous head stream. On scrape time, we simply take // the quantiles from the head stream (no merging required). Result: More effort // on observation time, less effort on scrape time, which is exactly the // opposite of what we try to accomplish, but at least the results are correct. // // The quite elegant previous contraption to merge the age buckets efficiently // on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) // can't be used anymore. // NewSummary creates a new Summary based on the provided SummaryOpts. func NewSummary(opts SummaryOpts) Summary { return newSummary( NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), opts, ) } func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { if len(desc.variableLabels) != len(labelValues) { panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) } for _, n := range desc.variableLabels { if n == quantileLabel { panic(errQuantileLabelNotAllowed) } } for _, lp := range desc.constLabelPairs { if lp.GetName() == quantileLabel { panic(errQuantileLabelNotAllowed) } } if opts.Objectives == nil { opts.Objectives = map[float64]float64{} } if opts.MaxAge < 0 { panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) } if opts.MaxAge == 0 { opts.MaxAge = DefMaxAge } if opts.AgeBuckets == 0 { opts.AgeBuckets = DefAgeBuckets } if opts.BufCap == 0 { opts.BufCap = DefBufCap } if len(opts.Objectives) == 0 { // Use the lock-free implementation of a Summary without objectives. s := &noObjectivesSummary{ desc: desc, labelPairs: MakeLabelPairs(desc, labelValues), counts: [2]*summaryCounts{{}, {}}, } s.init(s) // Init self-collection. return s } s := &summary{ desc: desc, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), labelPairs: MakeLabelPairs(desc, labelValues), hotBuf: make([]float64, 0, opts.BufCap), coldBuf: make([]float64, 0, opts.BufCap), streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), } s.headStreamExpTime = time.Now().Add(s.streamDuration) s.hotBufExpTime = s.headStreamExpTime for i := uint32(0); i < opts.AgeBuckets; i++ { s.streams = append(s.streams, s.newStream()) } s.headStream = s.streams[0] for qu := range s.objectives { s.sortedObjectives = append(s.sortedObjectives, qu) } sort.Float64s(s.sortedObjectives) s.init(s) // Init self-collection. return s } type summary struct { selfCollector bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. mtx sync.Mutex // Protects every other moving part. // Lock bufMtx before mtx if both are needed. desc *Desc objectives map[float64]float64 sortedObjectives []float64 labelPairs []*dto.LabelPair sum float64 cnt uint64 hotBuf, coldBuf []float64 streams []*quantile.Stream streamDuration time.Duration headStream *quantile.Stream headStreamIdx int headStreamExpTime, hotBufExpTime time.Time } func (s *summary) Desc() *Desc { return s.desc } func (s *summary) Observe(v float64) { s.bufMtx.Lock() defer s.bufMtx.Unlock() now := time.Now() if now.After(s.hotBufExpTime) { s.asyncFlush(now) } s.hotBuf = append(s.hotBuf, v) if len(s.hotBuf) == cap(s.hotBuf) { s.asyncFlush(now) } } func (s *summary) Write(out *dto.Metric) error { sum := &dto.Summary{} qs := make([]*dto.Quantile, 0, len(s.objectives)) s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. s.swapBufs(time.Now()) s.bufMtx.Unlock() s.flushColdBuf() sum.SampleCount = proto.Uint64(s.cnt) sum.SampleSum = proto.Float64(s.sum) for _, rank := range s.sortedObjectives { var q float64 if s.headStream.Count() == 0 { q = math.NaN() } else { q = s.headStream.Query(rank) } qs = append(qs, &dto.Quantile{ Quantile: proto.Float64(rank), Value: proto.Float64(q), }) } s.mtx.Unlock() if len(qs) > 0 { sort.Sort(quantSort(qs)) } sum.Quantile = qs out.Summary = sum out.Label = s.labelPairs return nil } func (s *summary) newStream() *quantile.Stream { return quantile.NewTargeted(s.objectives) } // asyncFlush needs bufMtx locked. func (s *summary) asyncFlush(now time.Time) { s.mtx.Lock() s.swapBufs(now) // Unblock the original goroutine that was responsible for the mutation // that triggered the compaction. But hold onto the global non-buffer // state mutex until the operation finishes. go func() { s.flushColdBuf() s.mtx.Unlock() }() } // rotateStreams needs mtx AND bufMtx locked. func (s *summary) maybeRotateStreams() { for !s.hotBufExpTime.Equal(s.headStreamExpTime) { s.headStream.Reset() s.headStreamIdx++ if s.headStreamIdx >= len(s.streams) { s.headStreamIdx = 0 } s.headStream = s.streams[s.headStreamIdx] s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) } } // flushColdBuf needs mtx locked. func (s *summary) flushColdBuf() { for _, v := range s.coldBuf { for _, stream := range s.streams { stream.Insert(v) } s.cnt++ s.sum += v } s.coldBuf = s.coldBuf[0:0] s.maybeRotateStreams() } // swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. func (s *summary) swapBufs(now time.Time) { if len(s.coldBuf) != 0 { panic("coldBuf is not empty") } s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf // hotBuf is now empty and gets new expiration set. for now.After(s.hotBufExpTime) { s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) } } type summaryCounts struct { // sumBits contains the bits of the float64 representing the sum of all // observations. sumBits and count have to go first in the struct to // guarantee alignment for atomic operations. // http://golang.org/pkg/sync/atomic/#pkg-note-BUG sumBits uint64 count uint64 } type noObjectivesSummary struct { // countAndHotIdx enables lock-free writes with use of atomic updates. // The most significant bit is the hot index [0 or 1] of the count field // below. Observe calls update the hot one. All remaining bits count the // number of Observe calls. Observe starts by incrementing this counter, // and finish by incrementing the count field in the respective // summaryCounts, as a marker for completion. // // Calls of the Write method (which are non-mutating reads from the // perspective of the summary) swap the hot–cold under the writeMtx // lock. A cooldown is awaited (while locked) by comparing the number of // observations with the initiation count. Once they match, then the // last observation on the now cool one has completed. All cool fields must // be merged into the new hot before releasing writeMtx. // Fields with atomic access first! See alignment constraint: // http://golang.org/pkg/sync/atomic/#pkg-note-BUG countAndHotIdx uint64 selfCollector desc *Desc writeMtx sync.Mutex // Only used in the Write method. // Two counts, one is "hot" for lock-free observations, the other is // "cold" for writing out a dto.Metric. It has to be an array of // pointers to guarantee 64bit alignment of the histogramCounts, see // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. counts [2]*summaryCounts labelPairs []*dto.LabelPair } func (s *noObjectivesSummary) Desc() *Desc { return s.desc } func (s *noObjectivesSummary) Observe(v float64) { // We increment h.countAndHotIdx so that the counter in the lower // 63 bits gets incremented. At the same time, we get the new value // back, which we can use to find the currently-hot counts. n := atomic.AddUint64(&s.countAndHotIdx, 1) hotCounts := s.counts[n>>63] for { oldBits := atomic.LoadUint64(&hotCounts.sumBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + v) if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { break } } // Increment count last as we take it as a signal that the observation // is complete. atomic.AddUint64(&hotCounts.count, 1) } func (s *noObjectivesSummary) Write(out *dto.Metric) error { // For simplicity, we protect this whole method by a mutex. It is not in // the hot path, i.e. Observe is called much more often than Write. The // complication of making Write lock-free isn't worth it, if possible at // all. s.writeMtx.Lock() defer s.writeMtx.Unlock() // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) // without touching the count bits. See the struct comments for a full // description of the algorithm. n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) // count is contained unchanged in the lower 63 bits. count := n & ((1 << 63) - 1) // The most significant bit tells us which counts is hot. The complement // is thus the cold one. hotCounts := s.counts[n>>63] coldCounts := s.counts[(^n)>>63] // Await cooldown. for count != atomic.LoadUint64(&coldCounts.count) { runtime.Gosched() // Let observations get work done. } sum := &dto.Summary{ SampleCount: proto.Uint64(count), SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), } out.Summary = sum out.Label = s.labelPairs // Finally add all the cold counts to the new hot counts and reset the cold counts. atomic.AddUint64(&hotCounts.count, count) atomic.StoreUint64(&coldCounts.count, 0) for { oldBits := atomic.LoadUint64(&hotCounts.sumBits) newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { atomic.StoreUint64(&coldCounts.sumBits, 0) break } } return nil } type quantSort []*dto.Quantile func (s quantSort) Len() int { return len(s) } func (s quantSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s quantSort) Less(i, j int) bool { return s[i].GetQuantile() < s[j].GetQuantile() } // SummaryVec is a Collector that bundles a set of Summaries that all share the // same Desc, but have different values for their variable labels. This is used // if you want to count the same thing partitioned by various dimensions // (e.g. HTTP request latencies, partitioned by status code and method). Create // instances with NewSummaryVec. type SummaryVec struct { *MetricVec } // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and // partitioned by the given label names. // // Due to the way a Summary is represented in the Prometheus text format and how // it is handled by the Prometheus server internally, “quantile” is an illegal // label name. NewSummaryVec will panic if this label name is used. func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { for _, ln := range labelNames { if ln == quantileLabel { panic(errQuantileLabelNotAllowed) } } desc := NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, labelNames, opts.ConstLabels, ) return &SummaryVec{ MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { return newSummary(desc, opts, lvs...) }), } } // GetMetricWithLabelValues returns the Summary for the given slice of label // values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Summary is created. // // It is possible to call this method without using the returned Summary to only // create the new Summary but leave it at its starting value, a Summary without // any observations. // // Keeping the Summary for later use is possible (and should be considered if // performance is critical), but keep in mind that Reset, DeleteLabelValues and // Delete can be used to delete the Summary from the SummaryVec. In that case, // the Summary will still exist, but it will not be exported anymore, even if a // Summary with the same label values is created later. See also the CounterVec // example. // // An error is returned if the number of label values is not the same as the // number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // an alternative to avoid that type of mistake. For higher label numbers, the // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). // See also the GaugeVec example. func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) if metric != nil { return metric.(Observer), err } return nil, err } // GetMetricWith returns the Summary for the given Labels map (the label names // must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Summary is created. Implications of // creating a Summary without using it and keeping the Summary for later use are // the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent // with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { metric, err := v.MetricVec.GetMetricWith(labels) if metric != nil { return metric.(Observer), err } return nil, err } // WithLabelValues works as GetMetricWithLabelValues, but panics where // GetMetricWithLabelValues would have returned an error. Not returning an // error allows shortcuts like // myVec.WithLabelValues("404", "GET").Observe(42.21) func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { s, err := v.GetMetricWithLabelValues(lvs...) if err != nil { panic(err) } return s } // With works as GetMetricWith, but panics where GetMetricWithLabels would have // returned an error. Not returning an error allows shortcuts like // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) func (v *SummaryVec) With(labels Labels) Observer { s, err := v.GetMetricWith(labels) if err != nil { panic(err) } return s } // CurryWith returns a vector curried with the provided labels, i.e. the // returned vector has those labels pre-set for all labeled operations performed // on it. The cardinality of the curried vector is reduced accordingly. The // order of the remaining labels stays the same (just with the curried labels // taken out of the sequence – which is relevant for the // (GetMetric)WithLabelValues methods). It is possible to curry a curried // vector, but only with labels not yet used for currying before. // // The metrics contained in the SummaryVec are shared between the curried and // uncurried vectors. They are just accessed differently. Curried and uncurried // vectors behave identically in terms of collection. Only one must be // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { vec, err := v.MetricVec.CurryWith(labels) if vec != nil { return &SummaryVec{vec}, err } return nil, err } // MustCurryWith works as CurryWith but panics where CurryWith would have // returned an error. func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { vec, err := v.CurryWith(labels) if err != nil { panic(err) } return vec } type constSummary struct { desc *Desc count uint64 sum float64 quantiles map[float64]float64 labelPairs []*dto.LabelPair } func (s *constSummary) Desc() *Desc { return s.desc } func (s *constSummary) Write(out *dto.Metric) error { sum := &dto.Summary{} qs := make([]*dto.Quantile, 0, len(s.quantiles)) sum.SampleCount = proto.Uint64(s.count) sum.SampleSum = proto.Float64(s.sum) for rank, q := range s.quantiles { qs = append(qs, &dto.Quantile{ Quantile: proto.Float64(rank), Value: proto.Float64(q), }) } if len(qs) > 0 { sort.Sort(quantSort(qs)) } sum.Quantile = qs out.Summary = sum out.Label = s.labelPairs return nil } // NewConstSummary returns a metric representing a Prometheus summary with fixed // values for the count, sum, and quantiles. As those parameters cannot be // changed, the returned value does not implement the Summary interface (but // only the Metric interface). Users of this package will not have much use for // it in regular operations. However, when implementing custom Collectors, it is // useful as a throw-away metric that is generated on the fly to send it to // Prometheus in the Collect method. // // quantiles maps ranks to quantile values. For example, a median latency of // 0.23s and a 99th percentile latency of 0.56s would be expressed as: // map[float64]float64{0.5: 0.23, 0.99: 0.56} // // NewConstSummary returns an error if the length of labelValues is not // consistent with the variable labels in Desc or if Desc is invalid. func NewConstSummary( desc *Desc, count uint64, sum float64, quantiles map[float64]float64, labelValues ...string, ) (Metric, error) { if desc.err != nil { return nil, desc.err } if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } return &constSummary{ desc: desc, count: count, sum: sum, quantiles: quantiles, labelPairs: MakeLabelPairs(desc, labelValues), }, nil } // MustNewConstSummary is a version of NewConstSummary that panics where // NewConstMetric would have returned an error. func MustNewConstSummary( desc *Desc, count uint64, sum float64, quantiles map[float64]float64, labelValues ...string, ) Metric { m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) if err != nil { panic(err) } return m } client_golang-1.11.0/prometheus/summary_test.go000066400000000000000000000231041405741072000216550ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "math" "math/rand" "sort" "sync" "testing" "testing/quick" "time" dto "github.com/prometheus/client_model/go" ) func TestSummaryWithDefaultObjectives(t *testing.T) { reg := NewRegistry() summaryWithDefaultObjectives := NewSummary(SummaryOpts{ Name: "default_objectives", Help: "Test help.", }) if err := reg.Register(summaryWithDefaultObjectives); err != nil { t.Error(err) } m := &dto.Metric{} if err := summaryWithDefaultObjectives.Write(m); err != nil { t.Error(err) } if len(m.GetSummary().Quantile) != 0 { t.Error("expected no objectives in summary") } } func TestSummaryWithoutObjectives(t *testing.T) { reg := NewRegistry() summaryWithEmptyObjectives := NewSummary(SummaryOpts{ Name: "empty_objectives", Help: "Test help.", Objectives: map[float64]float64{}, }) if err := reg.Register(summaryWithEmptyObjectives); err != nil { t.Error(err) } summaryWithEmptyObjectives.Observe(3) summaryWithEmptyObjectives.Observe(0.14) m := &dto.Metric{} if err := summaryWithEmptyObjectives.Write(m); err != nil { t.Error(err) } if got, want := m.GetSummary().GetSampleSum(), 3.14; got != want { t.Errorf("got sample sum %f, want %f", got, want) } if got, want := m.GetSummary().GetSampleCount(), uint64(2); got != want { t.Errorf("got sample sum %d, want %d", got, want) } if len(m.GetSummary().Quantile) != 0 { t.Error("expected no objectives in summary") } } func TestSummaryWithQuantileLabel(t *testing.T) { defer func() { if r := recover(); r == nil { t.Error("Attempt to create Summary with 'quantile' label did not panic.") } }() _ = NewSummary(SummaryOpts{ Name: "test_summary", Help: "less", ConstLabels: Labels{"quantile": "test"}, }) } func TestSummaryVecWithQuantileLabel(t *testing.T) { defer func() { if r := recover(); r == nil { t.Error("Attempt to create SummaryVec with 'quantile' label did not panic.") } }() _ = NewSummaryVec(SummaryOpts{ Name: "test_summary", Help: "less", }, []string{"quantile"}) } func benchmarkSummaryObserve(w int, b *testing.B) { b.StopTimer() wg := new(sync.WaitGroup) wg.Add(w) g := new(sync.WaitGroup) g.Add(1) s := NewSummary(SummaryOpts{}) for i := 0; i < w; i++ { go func() { g.Wait() for i := 0; i < b.N; i++ { s.Observe(float64(i)) } wg.Done() }() } b.StartTimer() g.Done() wg.Wait() } func BenchmarkSummaryObserve1(b *testing.B) { benchmarkSummaryObserve(1, b) } func BenchmarkSummaryObserve2(b *testing.B) { benchmarkSummaryObserve(2, b) } func BenchmarkSummaryObserve4(b *testing.B) { benchmarkSummaryObserve(4, b) } func BenchmarkSummaryObserve8(b *testing.B) { benchmarkSummaryObserve(8, b) } func benchmarkSummaryWrite(w int, b *testing.B) { b.StopTimer() wg := new(sync.WaitGroup) wg.Add(w) g := new(sync.WaitGroup) g.Add(1) s := NewSummary(SummaryOpts{}) for i := 0; i < 1000000; i++ { s.Observe(float64(i)) } for j := 0; j < w; j++ { outs := make([]dto.Metric, b.N) go func(o []dto.Metric) { g.Wait() for i := 0; i < b.N; i++ { s.Write(&o[i]) } wg.Done() }(outs) } b.StartTimer() g.Done() wg.Wait() } func BenchmarkSummaryWrite1(b *testing.B) { benchmarkSummaryWrite(1, b) } func BenchmarkSummaryWrite2(b *testing.B) { benchmarkSummaryWrite(2, b) } func BenchmarkSummaryWrite4(b *testing.B) { benchmarkSummaryWrite(4, b) } func BenchmarkSummaryWrite8(b *testing.B) { benchmarkSummaryWrite(8, b) } func TestSummaryConcurrency(t *testing.T) { if testing.Short() { t.Skip("Skipping test in short mode.") } rand.Seed(42) objMap := map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} it := func(n uint32) bool { mutations := int(n%1e4 + 1e4) concLevel := int(n%5 + 1) total := mutations * concLevel var start, end sync.WaitGroup start.Add(1) end.Add(concLevel) sum := NewSummary(SummaryOpts{ Name: "test_summary", Help: "helpless", Objectives: objMap, }) allVars := make([]float64, total) var sampleSum float64 for i := 0; i < concLevel; i++ { vals := make([]float64, mutations) for j := 0; j < mutations; j++ { v := rand.NormFloat64() vals[j] = v allVars[i*mutations+j] = v sampleSum += v } go func(vals []float64) { start.Wait() for _, v := range vals { sum.Observe(v) } end.Done() }(vals) } sort.Float64s(allVars) start.Done() end.Wait() m := &dto.Metric{} sum.Write(m) if got, want := int(*m.Summary.SampleCount), total; got != want { t.Errorf("got sample count %d, want %d", got, want) } if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { t.Errorf("got sample sum %f, want %f", got, want) } objSlice := make([]float64, 0, len(objMap)) for qu := range objMap { objSlice = append(objSlice, qu) } sort.Float64s(objSlice) for i, wantQ := range objSlice { ε := objMap[wantQ] gotQ := *m.Summary.Quantile[i].Quantile gotV := *m.Summary.Quantile[i].Value min, max := getBounds(allVars, wantQ, ε) if gotQ != wantQ { t.Errorf("got quantile %f, want %f", gotQ, wantQ) } if gotV < min || gotV > max { t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) } } return true } if err := quick.Check(it, nil); err != nil { t.Error(err) } } func TestSummaryVecConcurrency(t *testing.T) { if testing.Short() { t.Skip("Skipping test in short mode.") } rand.Seed(42) objMap := map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} objSlice := make([]float64, 0, len(objMap)) for qu := range objMap { objSlice = append(objSlice, qu) } sort.Float64s(objSlice) it := func(n uint32) bool { mutations := int(n%1e4 + 1e4) concLevel := int(n%7 + 1) vecLength := int(n%3 + 1) var start, end sync.WaitGroup start.Add(1) end.Add(concLevel) sum := NewSummaryVec( SummaryOpts{ Name: "test_summary", Help: "helpless", Objectives: objMap, }, []string{"label"}, ) allVars := make([][]float64, vecLength) sampleSums := make([]float64, vecLength) for i := 0; i < concLevel; i++ { vals := make([]float64, mutations) picks := make([]int, mutations) for j := 0; j < mutations; j++ { v := rand.NormFloat64() vals[j] = v pick := rand.Intn(vecLength) picks[j] = pick allVars[pick] = append(allVars[pick], v) sampleSums[pick] += v } go func(vals []float64) { start.Wait() for i, v := range vals { sum.WithLabelValues(string('A' + rune(picks[i]))).Observe(v) } end.Done() }(vals) } for _, vars := range allVars { sort.Float64s(vars) } start.Done() end.Wait() for i := 0; i < vecLength; i++ { m := &dto.Metric{} s := sum.WithLabelValues(string('A' + rune(i))) s.(Summary).Write(m) if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) } if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) } for j, wantQ := range objSlice { ε := objMap[wantQ] gotQ := *m.Summary.Quantile[j].Quantile gotV := *m.Summary.Quantile[j].Value min, max := getBounds(allVars[i], wantQ, ε) if gotQ != wantQ { t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) } if gotV < min || gotV > max { t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) } } } return true } if err := quick.Check(it, nil); err != nil { t.Error(err) } } func TestSummaryDecay(t *testing.T) { if testing.Short() { t.Skip("Skipping test in short mode.") // More because it depends on timing than because it is particularly long... } sum := NewSummary(SummaryOpts{ Name: "test_summary", Help: "helpless", MaxAge: 100 * time.Millisecond, Objectives: map[float64]float64{0.1: 0.001}, AgeBuckets: 10, }) m := &dto.Metric{} i := 0 tick := time.NewTicker(time.Millisecond) for range tick.C { i++ sum.Observe(float64(i)) if i%10 == 0 { sum.Write(m) if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { t.Errorf("%d. got %f, want %f", i, got, want) } m.Reset() } if i >= 1000 { break } } tick.Stop() // Wait for MaxAge without observations and make sure quantiles are NaN. time.Sleep(100 * time.Millisecond) sum.Write(m) if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { t.Errorf("got %f, want NaN after expiration", got) } } func getBounds(vars []float64, q, ε float64) (min, max float64) { // TODO(beorn7): This currently tolerates an error of up to 2*ε. The // error must be at most ε, but for some reason, it's sometimes slightly // higher. That's a bug. n := float64(len(vars)) lower := int((q - 2*ε) * n) upper := int(math.Ceil((q + 2*ε) * n)) min = vars[0] if lower > 1 { min = vars[lower-1] } max = vars[len(vars)-1] if upper < len(vars) { max = vars[upper-1] } return } client_golang-1.11.0/prometheus/testutil/000077500000000000000000000000001405741072000204475ustar00rootroot00000000000000client_golang-1.11.0/prometheus/testutil/lint.go000066400000000000000000000033231405741072000217450ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package testutil import ( "fmt" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil/promlint" ) // CollectAndLint registers the provided Collector with a newly created pedantic // Registry. It then calls GatherAndLint with that Registry and with the // provided metricNames. func CollectAndLint(c prometheus.Collector, metricNames ...string) ([]promlint.Problem, error) { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { return nil, fmt.Errorf("registering collector failed: %s", err) } return GatherAndLint(reg, metricNames...) } // GatherAndLint gathers all metrics from the provided Gatherer and checks them // with the linter in the promlint package. If any metricNames are provided, // only metrics with those names are checked. func GatherAndLint(g prometheus.Gatherer, metricNames ...string) ([]promlint.Problem, error) { got, err := g.Gather() if err != nil { return nil, fmt.Errorf("gathering metrics failed: %s", err) } if metricNames != nil { got = filterMetrics(got, metricNames) } return promlint.NewWithMetricFamilies(got).Lint() } client_golang-1.11.0/prometheus/testutil/lint_test.go000066400000000000000000000037001405741072000230030ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package testutil import ( "testing" "github.com/prometheus/client_golang/prometheus" ) func TestCollectAndLintGood(t *testing.T) { cnt := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "some_total", Help: "A value that represents a counter.", ConstLabels: prometheus.Labels{ "label1": "value1", }, }, []string{"foo"}, ) cnt.WithLabelValues("bar") cnt.WithLabelValues("baz") problems, err := CollectAndLint(cnt) if err != nil { t.Error("Unexpected error:", err) } if len(problems) > 0 { t.Error("Unexpected lint problems:", problems) } } func TestCollectAndLintBad(t *testing.T) { cnt := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "someThing_ms", Help: "A value that represents a counter.", ConstLabels: prometheus.Labels{ "label1": "value1", }, }, []string{"fooBar"}, ) cnt.WithLabelValues("bar") cnt.WithLabelValues("baz") problems, err := CollectAndLint(cnt) if err != nil { t.Error("Unexpected error:", err) } if len(problems) < 5 { // The exact nature of the lint problems found is tested within // the promlint package itself. Here we only want to make sure // that the collector successfully hits the linter and that at // least the five problems that the linter could recognize at // the time of writing this test are flagged. t.Error("Not enough lint problems found.") } } client_golang-1.11.0/prometheus/testutil/promlint/000077500000000000000000000000001405741072000223135ustar00rootroot00000000000000client_golang-1.11.0/prometheus/testutil/promlint/promlint.go000066400000000000000000000241431405741072000245120ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package promlint provides a linter for Prometheus metrics. package promlint import ( "fmt" "io" "regexp" "sort" "strings" "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" ) // A Linter is a Prometheus metrics linter. It identifies issues with metric // names, types, and metadata, and reports them to the caller. type Linter struct { // The linter will read metrics in the Prometheus text format from r and // then lint it, _and_ it will lint the metrics provided directly as // MetricFamily proto messages in mfs. Note, however, that the current // constructor functions New and NewWithMetricFamilies only ever set one // of them. r io.Reader mfs []*dto.MetricFamily } // A Problem is an issue detected by a Linter. type Problem struct { // The name of the metric indicated by this Problem. Metric string // A description of the issue for this Problem. Text string } // newProblem is helper function to create a Problem. func newProblem(mf *dto.MetricFamily, text string) Problem { return Problem{ Metric: mf.GetName(), Text: text, } } // New creates a new Linter that reads an input stream of Prometheus metrics in // the Prometheus text exposition format. func New(r io.Reader) *Linter { return &Linter{ r: r, } } // NewWithMetricFamilies creates a new Linter that reads from a slice of // MetricFamily protobuf messages. func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter { return &Linter{ mfs: mfs, } } // Lint performs a linting pass, returning a slice of Problems indicating any // issues found in the metrics stream. The slice is sorted by metric name // and issue description. func (l *Linter) Lint() ([]Problem, error) { var problems []Problem if l.r != nil { d := expfmt.NewDecoder(l.r, expfmt.FmtText) mf := &dto.MetricFamily{} for { if err := d.Decode(mf); err != nil { if err == io.EOF { break } return nil, err } problems = append(problems, lint(mf)...) } } for _, mf := range l.mfs { problems = append(problems, lint(mf)...) } // Ensure deterministic output. sort.SliceStable(problems, func(i, j int) bool { if problems[i].Metric == problems[j].Metric { return problems[i].Text < problems[j].Text } return problems[i].Metric < problems[j].Metric }) return problems, nil } // lint is the entry point for linting a single metric. func lint(mf *dto.MetricFamily) []Problem { fns := []func(mf *dto.MetricFamily) []Problem{ lintHelp, lintMetricUnits, lintCounter, lintHistogramSummaryReserved, lintMetricTypeInName, lintReservedChars, lintCamelCase, lintUnitAbbreviations, } var problems []Problem for _, fn := range fns { problems = append(problems, fn(mf)...) } // TODO(mdlayher): lint rules for specific metrics types. return problems } // lintHelp detects issues related to the help text for a metric. func lintHelp(mf *dto.MetricFamily) []Problem { var problems []Problem // Expect all metrics to have help text available. if mf.Help == nil { problems = append(problems, newProblem(mf, "no help text")) } return problems } // lintMetricUnits detects issues with metric unit names. func lintMetricUnits(mf *dto.MetricFamily) []Problem { var problems []Problem unit, base, ok := metricUnits(*mf.Name) if !ok { // No known units detected. return nil } // Unit is already a base unit. if unit == base { return nil } problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit))) return problems } // lintCounter detects issues specific to counters, as well as patterns that should // only be used with counters. func lintCounter(mf *dto.MetricFamily) []Problem { var problems []Problem isCounter := mf.GetType() == dto.MetricType_COUNTER isUntyped := mf.GetType() == dto.MetricType_UNTYPED hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total") switch { case isCounter && !hasTotalSuffix: problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`)) case !isUntyped && !isCounter && hasTotalSuffix: problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`)) } return problems } // lintHistogramSummaryReserved detects when other types of metrics use names or labels // reserved for use by histograms and/or summaries. func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem { // These rules do not apply to untyped metrics. t := mf.GetType() if t == dto.MetricType_UNTYPED { return nil } var problems []Problem isHistogram := t == dto.MetricType_HISTOGRAM isSummary := t == dto.MetricType_SUMMARY n := mf.GetName() if !isHistogram && strings.HasSuffix(n, "_bucket") { problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`)) } if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") { problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`)) } if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") { problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`)) } for _, m := range mf.GetMetric() { for _, l := range m.GetLabel() { ln := l.GetName() if !isHistogram && ln == "le" { problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`)) } if !isSummary && ln == "quantile" { problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`)) } } } return problems } // lintMetricTypeInName detects when metric types are included in the metric name. func lintMetricTypeInName(mf *dto.MetricFamily) []Problem { var problems []Problem n := strings.ToLower(mf.GetName()) for i, t := range dto.MetricType_name { if i == int32(dto.MetricType_UNTYPED) { continue } typename := strings.ToLower(t) if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) { problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename))) } } return problems } // lintReservedChars detects colons in metric names. func lintReservedChars(mf *dto.MetricFamily) []Problem { var problems []Problem if strings.Contains(mf.GetName(), ":") { problems = append(problems, newProblem(mf, "metric names should not contain ':'")) } return problems } var camelCase = regexp.MustCompile(`[a-z][A-Z]`) // lintCamelCase detects metric names and label names written in camelCase. func lintCamelCase(mf *dto.MetricFamily) []Problem { var problems []Problem if camelCase.FindString(mf.GetName()) != "" { problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'")) } for _, m := range mf.GetMetric() { for _, l := range m.GetLabel() { if camelCase.FindString(l.GetName()) != "" { problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'")) } } } return problems } // lintUnitAbbreviations detects abbreviated units in the metric name. func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem { var problems []Problem n := strings.ToLower(mf.GetName()) for _, s := range unitAbbreviations { if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) { problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units")) } } return problems } // metricUnits attempts to detect known unit types used as part of a metric name, // e.g. "foo_bytes_total" or "bar_baz_milligrams". func metricUnits(m string) (unit string, base string, ok bool) { ss := strings.Split(m, "_") for unit, base := range units { // Also check for "no prefix". for _, p := range append(unitPrefixes, "") { for _, s := range ss { // Attempt to explicitly match a known unit with a known prefix, // as some words may look like "units" when matching suffix. // // As an example, "thermometers" should not match "meters", but // "kilometers" should. if s == p+unit { return p + unit, base, true } } } } return "", "", false } // Units and their possible prefixes recognized by this library. More can be // added over time as needed. var ( // map a unit to the appropriate base unit. units = map[string]string{ // Base units. "amperes": "amperes", "bytes": "bytes", "celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases. "grams": "grams", "joules": "joules", "kelvin": "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements). "meters": "meters", // Both American and international spelling permitted. "metres": "metres", "seconds": "seconds", "volts": "volts", // Non base units. // Time. "minutes": "seconds", "hours": "seconds", "days": "seconds", "weeks": "seconds", // Temperature. "kelvins": "kelvin", "fahrenheit": "celsius", "rankine": "celsius", // Length. "inches": "meters", "yards": "meters", "miles": "meters", // Bytes. "bits": "bytes", // Energy. "calories": "joules", // Mass. "pounds": "grams", "ounces": "grams", } unitPrefixes = []string{ "pico", "nano", "micro", "milli", "centi", "deci", "deca", "hecto", "kilo", "kibi", "mega", "mibi", "giga", "gibi", "tera", "tebi", "peta", "pebi", } // Common abbreviations that we'd like to discourage. unitAbbreviations = []string{ "s", "ms", "us", "ns", "sec", "b", "kb", "mb", "gb", "tb", "pb", "m", "h", "d", } ) client_golang-1.11.0/prometheus/testutil/promlint/promlint_test.go000066400000000000000000000422241405741072000255510ustar00rootroot00000000000000// Copyright 2020 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package promlint_test import ( "fmt" "reflect" "strings" "testing" "github.com/prometheus/client_golang/prometheus/testutil/promlint" ) type test struct { name string in string problems []promlint.Problem } func TestLintNoHelpText(t *testing.T) { const msg = "no help text" tests := []test{ { name: "no help", in: ` # TYPE go_goroutines gauge go_goroutines 24 `, problems: []promlint.Problem{{ Metric: "go_goroutines", Text: msg, }}, }, { name: "empty help", in: ` # HELP go_goroutines # TYPE go_goroutines gauge go_goroutines 24 `, problems: []promlint.Problem{{ Metric: "go_goroutines", Text: msg, }}, }, { name: "no help and empty help", in: ` # HELP go_goroutines # TYPE go_goroutines gauge go_goroutines 24 # TYPE go_threads gauge go_threads 10 `, problems: []promlint.Problem{ { Metric: "go_goroutines", Text: msg, }, { Metric: "go_threads", Text: msg, }, }, }, { name: "OK", in: ` # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 24 `, }, } runTests(t, tests) } func TestLintMetricUnits(t *testing.T) { tests := []struct { name string in string problems []promlint.Problem }{ // good cases. { name: "amperes", in: ` # HELP x_amperes Test metric. # TYPE x_amperes untyped x_amperes 10 `, }, { name: "bytes", in: ` # HELP x_bytes Test metric. # TYPE x_bytes untyped x_bytes 10 `, }, { name: "grams", in: ` # HELP x_grams Test metric. # TYPE x_grams untyped x_grams 10 `, }, { name: "celsius", in: ` # HELP x_celsius Test metric. # TYPE x_celsius untyped x_celsius 10 `, }, { name: "meters", in: ` # HELP x_meters Test metric. # TYPE x_meters untyped x_meters 10 `, }, { name: "metres", in: ` # HELP x_metres Test metric. # TYPE x_metres untyped x_metres 10 `, }, { name: "moles", in: ` # HELP x_moles Test metric. # TYPE x_moles untyped x_moles 10 `, }, { name: "seconds", in: ` # HELP x_seconds Test metric. # TYPE x_seconds untyped x_seconds 10 `, }, { name: "joules", in: ` # HELP x_joules Test metric. # TYPE x_joules untyped x_joules 10 `, }, { name: "kelvin", in: ` # HELP x_kelvin Test metric. # TYPE x_kelvin untyped x_kelvin 10 `, }, // bad cases. { name: "milliamperes", in: ` # HELP x_milliamperes Test metric. # TYPE x_milliamperes untyped x_milliamperes 10 `, problems: []promlint.Problem{{ Metric: "x_milliamperes", Text: `use base unit "amperes" instead of "milliamperes"`, }}, }, { name: "gigabytes", in: ` # HELP x_gigabytes Test metric. # TYPE x_gigabytes untyped x_gigabytes 10 `, problems: []promlint.Problem{{ Metric: "x_gigabytes", Text: `use base unit "bytes" instead of "gigabytes"`, }}, }, { name: "kilograms", in: ` # HELP x_kilograms Test metric. # TYPE x_kilograms untyped x_kilograms 10 `, problems: []promlint.Problem{{ Metric: "x_kilograms", Text: `use base unit "grams" instead of "kilograms"`, }}, }, { name: "nanocelsius", in: ` # HELP x_nanocelsius Test metric. # TYPE x_nanocelsius untyped x_nanocelsius 10 `, problems: []promlint.Problem{{ Metric: "x_nanocelsius", Text: `use base unit "celsius" instead of "nanocelsius"`, }}, }, { name: "kilometers", in: ` # HELP x_kilometers Test metric. # TYPE x_kilometers untyped x_kilometers 10 `, problems: []promlint.Problem{{ Metric: "x_kilometers", Text: `use base unit "meters" instead of "kilometers"`, }}, }, { name: "picometers", in: ` # HELP x_picometers Test metric. # TYPE x_picometers untyped x_picometers 10 `, problems: []promlint.Problem{{ Metric: "x_picometers", Text: `use base unit "meters" instead of "picometers"`, }}, }, { name: "microseconds", in: ` # HELP x_microseconds Test metric. # TYPE x_microseconds untyped x_microseconds 10 `, problems: []promlint.Problem{{ Metric: "x_microseconds", Text: `use base unit "seconds" instead of "microseconds"`, }}, }, { name: "minutes", in: ` # HELP x_minutes Test metric. # TYPE x_minutes untyped x_minutes 10 `, problems: []promlint.Problem{{ Metric: "x_minutes", Text: `use base unit "seconds" instead of "minutes"`, }}, }, { name: "hours", in: ` # HELP x_hours Test metric. # TYPE x_hours untyped x_hours 10 `, problems: []promlint.Problem{{ Metric: "x_hours", Text: `use base unit "seconds" instead of "hours"`, }}, }, { name: "days", in: ` # HELP x_days Test metric. # TYPE x_days untyped x_days 10 `, problems: []promlint.Problem{{ Metric: "x_days", Text: `use base unit "seconds" instead of "days"`, }}, }, { name: "kelvins", in: ` # HELP x_kelvins Test metric. # TYPE x_kelvins untyped x_kelvins 10 `, problems: []promlint.Problem{{ Metric: "x_kelvins", Text: `use base unit "kelvin" instead of "kelvins"`, }}, }, { name: "fahrenheit", in: ` # HELP thermometers_fahrenheit Test metric. # TYPE thermometers_fahrenheit untyped thermometers_fahrenheit 10 `, problems: []promlint.Problem{{ Metric: "thermometers_fahrenheit", Text: `use base unit "celsius" instead of "fahrenheit"`, }}, }, { name: "rankine", in: ` # HELP thermometers_rankine Test metric. # TYPE thermometers_rankine untyped thermometers_rankine 10 `, problems: []promlint.Problem{{ Metric: "thermometers_rankine", Text: `use base unit "celsius" instead of "rankine"`, }}, }, { name: "inches", in: ` # HELP x_inches Test metric. # TYPE x_inches untyped x_inches 10 `, problems: []promlint.Problem{{ Metric: "x_inches", Text: `use base unit "meters" instead of "inches"`, }}, }, { name: "yards", in: ` # HELP x_yards Test metric. # TYPE x_yards untyped x_yards 10 `, problems: []promlint.Problem{{ Metric: "x_yards", Text: `use base unit "meters" instead of "yards"`, }}, }, { name: "miles", in: ` # HELP x_miles Test metric. # TYPE x_miles untyped x_miles 10 `, problems: []promlint.Problem{{ Metric: "x_miles", Text: `use base unit "meters" instead of "miles"`, }}, }, { name: "bits", in: ` # HELP x_bits Test metric. # TYPE x_bits untyped x_bits 10 `, problems: []promlint.Problem{{ Metric: "x_bits", Text: `use base unit "bytes" instead of "bits"`, }}, }, { name: "calories", in: ` # HELP x_calories Test metric. # TYPE x_calories untyped x_calories 10 `, problems: []promlint.Problem{{ Metric: "x_calories", Text: `use base unit "joules" instead of "calories"`, }}, }, { name: "pounds", in: ` # HELP x_pounds Test metric. # TYPE x_pounds untyped x_pounds 10 `, problems: []promlint.Problem{{ Metric: "x_pounds", Text: `use base unit "grams" instead of "pounds"`, }}, }, { name: "ounces", in: ` # HELP x_ounces Test metric. # TYPE x_ounces untyped x_ounces 10 `, problems: []promlint.Problem{{ Metric: "x_ounces", Text: `use base unit "grams" instead of "ounces"`, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { l := promlint.New(strings.NewReader(tt.in)) problems, err := l.Lint() if err != nil { t.Fatalf("unexpected error: %v", err) } if want, got := tt.problems, problems; !reflect.DeepEqual(want, got) { t.Fatalf("unexpected problems:\n- want: %v\n- got: %v", want, got) } }) } } func TestLintCounter(t *testing.T) { tests := []test{ { name: "counter without _total suffix", in: ` # HELP x_bytes Test metric. # TYPE x_bytes counter x_bytes 10 `, problems: []promlint.Problem{{ Metric: "x_bytes", Text: `counter metrics should have "_total" suffix`, }}, }, { name: "gauge with _total suffix", in: ` # HELP x_bytes_total Test metric. # TYPE x_bytes_total gauge x_bytes_total 10 `, problems: []promlint.Problem{{ Metric: "x_bytes_total", Text: `non-counter metrics should not have "_total" suffix`, }}, }, { name: "counter with _total suffix", in: ` # HELP x_bytes_total Test metric. # TYPE x_bytes_total counter x_bytes_total 10 `, }, { name: "gauge without _total suffix", in: ` # HELP x_bytes Test metric. # TYPE x_bytes gauge x_bytes 10 `, }, { name: "untyped with _total suffix", in: ` # HELP x_bytes_total Test metric. # TYPE x_bytes_total untyped x_bytes_total 10 `, }, { name: "untyped without _total suffix", in: ` # HELP x_bytes Test metric. # TYPE x_bytes untyped x_bytes 10 `, }, } runTests(t, tests) } func TestLintHistogramSummaryReserved(t *testing.T) { tests := []test{ { name: "gauge with _bucket suffix", in: ` # HELP x_bytes_bucket Test metric. # TYPE x_bytes_bucket gauge x_bytes_bucket 10 `, problems: []promlint.Problem{{ Metric: "x_bytes_bucket", Text: `non-histogram metrics should not have "_bucket" suffix`, }}, }, { name: "gauge with _count suffix", in: ` # HELP x_bytes_count Test metric. # TYPE x_bytes_count gauge x_bytes_count 10 `, problems: []promlint.Problem{{ Metric: "x_bytes_count", Text: `non-histogram and non-summary metrics should not have "_count" suffix`, }}, }, { name: "gauge with _sum suffix", in: ` # HELP x_bytes_sum Test metric. # TYPE x_bytes_sum gauge x_bytes_sum 10 `, problems: []promlint.Problem{{ Metric: "x_bytes_sum", Text: `non-histogram and non-summary metrics should not have "_sum" suffix`, }}, }, { name: "gauge with le label", in: ` # HELP x_bytes Test metric. # TYPE x_bytes gauge x_bytes{le="1"} 10 `, problems: []promlint.Problem{{ Metric: "x_bytes", Text: `non-histogram metrics should not have "le" label`, }}, }, { name: "gauge with quantile label", in: ` # HELP x_bytes Test metric. # TYPE x_bytes gauge x_bytes{quantile="1"} 10 `, problems: []promlint.Problem{{ Metric: "x_bytes", Text: `non-summary metrics should not have "quantile" label`, }}, }, { name: "histogram with quantile label", in: ` # HELP tsdb_compaction_duration Duration of compaction runs. # TYPE tsdb_compaction_duration histogram tsdb_compaction_duration_bucket{le="0.005",quantile="0.01"} 0 tsdb_compaction_duration_bucket{le="0.01",quantile="0.01"} 0 tsdb_compaction_duration_bucket{le="0.025",quantile="0.01"} 0 tsdb_compaction_duration_bucket{le="0.05",quantile="0.01"} 0 tsdb_compaction_duration_bucket{le="0.1",quantile="0.01"} 0 tsdb_compaction_duration_bucket{le="0.25",quantile="0.01"} 0 tsdb_compaction_duration_bucket{le="0.5",quantile="0.01"} 57 tsdb_compaction_duration_bucket{le="1",quantile="0.01"} 68 tsdb_compaction_duration_bucket{le="2.5",quantile="0.01"} 69 tsdb_compaction_duration_bucket{le="5",quantile="0.01"} 69 tsdb_compaction_duration_bucket{le="10",quantile="0.01"} 69 tsdb_compaction_duration_bucket{le="+Inf",quantile="0.01"} 69 tsdb_compaction_duration_sum 28.740810936000006 tsdb_compaction_duration_count 69 `, problems: []promlint.Problem{{ Metric: "tsdb_compaction_duration", Text: `non-summary metrics should not have "quantile" label`, }}, }, { name: "summary with le label", in: ` # HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0",le="0.01"} 4.2365e-05 go_gc_duration_seconds{quantile="0.25",le="0.01"} 8.1492e-05 go_gc_duration_seconds{quantile="0.5",le="0.01"} 0.000100656 go_gc_duration_seconds{quantile="0.75",le="0.01"} 0.000113913 go_gc_duration_seconds{quantile="1",le="0.01"} 0.021754305 go_gc_duration_seconds_sum 1.769429004 go_gc_duration_seconds_count 5962 `, problems: []promlint.Problem{{ Metric: "go_gc_duration_seconds", Text: `non-histogram metrics should not have "le" label`, }}, }, { name: "histogram OK", in: ` # HELP tsdb_compaction_duration Duration of compaction runs. # TYPE tsdb_compaction_duration histogram tsdb_compaction_duration_bucket{le="0.005"} 0 tsdb_compaction_duration_bucket{le="0.01"} 0 tsdb_compaction_duration_bucket{le="0.025"} 0 tsdb_compaction_duration_bucket{le="0.05"} 0 tsdb_compaction_duration_bucket{le="0.1"} 0 tsdb_compaction_duration_bucket{le="0.25"} 0 tsdb_compaction_duration_bucket{le="0.5"} 57 tsdb_compaction_duration_bucket{le="1"} 68 tsdb_compaction_duration_bucket{le="2.5"} 69 tsdb_compaction_duration_bucket{le="5"} 69 tsdb_compaction_duration_bucket{le="10"} 69 tsdb_compaction_duration_bucket{le="+Inf"} 69 tsdb_compaction_duration_sum 28.740810936000006 tsdb_compaction_duration_count 69 `, }, { name: "summary OK", in: ` # HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 4.2365e-05 go_gc_duration_seconds{quantile="0.25"} 8.1492e-05 go_gc_duration_seconds{quantile="0.5"} 0.000100656 go_gc_duration_seconds{quantile="0.75"} 0.000113913 go_gc_duration_seconds{quantile="1"} 0.021754305 go_gc_duration_seconds_sum 1.769429004 go_gc_duration_seconds_count 5962 `, }, } runTests(t, tests) } func TestLintMetricTypeInName(t *testing.T) { genTest := func(n, t, err string, problems ...promlint.Problem) test { return test{ name: fmt.Sprintf("%s with _%s suffix", t, t), in: fmt.Sprintf(` # HELP %s Test metric. # TYPE %s %s %s 10 `, n, n, t, n), problems: append(problems, promlint.Problem{ Metric: n, Text: fmt.Sprintf(`metric name should not include type '%s'`, err), }), } } twoProbTest := genTest("http_requests_counter", "counter", "counter", promlint.Problem{ Metric: "http_requests_counter", Text: `counter metrics should have "_total" suffix`, }) tests := []test{ twoProbTest, genTest("instance_memory_limit_bytes_gauge", "gauge", "gauge"), genTest("request_duration_seconds_summary", "summary", "summary"), genTest("request_duration_seconds_summary", "histogram", "summary"), genTest("request_duration_seconds_histogram", "histogram", "histogram"), genTest("request_duration_seconds_HISTOGRAM", "histogram", "histogram"), genTest("instance_memory_limit_gauge_bytes", "gauge", "gauge"), } runTests(t, tests) } func TestLintReservedChars(t *testing.T) { tests := []test{ { name: "request_duration::_seconds", in: ` # HELP request_duration::_seconds Test metric. # TYPE request_duration::_seconds histogram request_duration::_seconds 10 `, problems: []promlint.Problem{ { Metric: "request_duration::_seconds", Text: "metric names should not contain ':'", }, }, }, } runTests(t, tests) } func TestLintCamelCase(t *testing.T) { tests := []test{ { name: "requestDuration_seconds", in: ` # HELP requestDuration_seconds Test metric. # TYPE requestDuration_seconds histogram requestDuration_seconds 10 `, problems: []promlint.Problem{ { Metric: "requestDuration_seconds", Text: "metric names should be written in 'snake_case' not 'camelCase'", }, }, }, { name: "request_duration_seconds", in: ` # HELP request_duration_seconds Test metric. # TYPE request_duration_seconds histogram request_duration_seconds{httpService="foo"} 10 `, problems: []promlint.Problem{ { Metric: "request_duration_seconds", Text: "label names should be written in 'snake_case' not 'camelCase'", }, }, }, } runTests(t, tests) } func TestLintUnitAbbreviations(t *testing.T) { genTest := func(n string) test { return test{ name: fmt.Sprintf("%s with abbreviated unit", n), in: fmt.Sprintf(` # HELP %s Test metric. # TYPE %s gauge %s 10 `, n, n, n), problems: []promlint.Problem{ { Metric: n, Text: "metric names should not contain abbreviated units", }, }, } } tests := []test{ genTest("instance_memory_limit_b"), genTest("instance_memory_limit_kb"), genTest("instance_memory_limit_mb"), genTest("instance_memory_limit_MB"), genTest("instance_memory_limit_gb"), genTest("instance_memory_limit_tb"), genTest("instance_memory_limit_pb"), genTest("request_duration_s"), genTest("request_duration_ms"), genTest("request_duration_us"), genTest("request_duration_ns"), genTest("request_duration_sec"), genTest("request_sec_duration"), genTest("request_duration_m"), genTest("request_duration_h"), genTest("request_duration_d"), } runTests(t, tests) } func runTests(t *testing.T, tests []test) { t.Helper() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { l := promlint.New(strings.NewReader(tt.in)) problems, err := l.Lint() if err != nil { t.Fatalf("unexpected error: %v", err) } if want, got := tt.problems, problems; !reflect.DeepEqual(want, got) { t.Fatalf("unexpected problems:\n- want: %v\n- got: %v", want, got) } }) } } client_golang-1.11.0/prometheus/testutil/testutil.go000066400000000000000000000205331405741072000226560ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package testutil provides helpers to test code using the prometheus package // of client_golang. // // While writing unit tests to verify correct instrumentation of your code, it's // a common mistake to mostly test the instrumentation library instead of your // own code. Rather than verifying that a prometheus.Counter's value has changed // as expected or that it shows up in the exposition after registration, it is // in general more robust and more faithful to the concept of unit tests to use // mock implementations of the prometheus.Counter and prometheus.Registerer // interfaces that simply assert that the Add or Register methods have been // called with the expected arguments. However, this might be overkill in simple // scenarios. The ToFloat64 function is provided for simple inspection of a // single-value metric, but it has to be used with caution. // // End-to-end tests to verify all or larger parts of the metrics exposition can // be implemented with the CollectAndCompare or GatherAndCompare functions. The // most appropriate use is not so much testing instrumentation of your code, but // testing custom prometheus.Collector implementations and in particular whole // exporters, i.e. programs that retrieve telemetry data from a 3rd party source // and convert it into Prometheus metrics. // // In a similar pattern, CollectAndLint and GatherAndLint can be used to detect // metrics that have issues with their name, type, or metadata without being // necessarily invalid, e.g. a counter with a name missing the “_total” suffix. package testutil import ( "bytes" "fmt" "io" "github.com/prometheus/common/expfmt" dto "github.com/prometheus/client_model/go" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/internal" ) // ToFloat64 collects all Metrics from the provided Collector. It expects that // this results in exactly one Metric being collected, which must be a Gauge, // Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns // the value of the collected Metric. // // The Collector provided is typically a simple instance of Gauge or Counter, or // – less commonly – a GaugeVec or CounterVec with exactly one element. But any // Collector fulfilling the prerequisites described above will do. // // Use this function with caution. It is computationally very expensive and thus // not suited at all to read values from Metrics in regular code. This is really // only for testing purposes, and even for testing, other approaches are often // more appropriate (see this package's documentation). // // A clear anti-pattern would be to use a metric type from the prometheus // package to track values that are also needed for something else than the // exposition of Prometheus metrics. For example, you would like to track the // number of items in a queue because your code should reject queuing further // items if a certain limit is reached. It is tempting to track the number of // items in a prometheus.Gauge, as it is then easily available as a metric for // exposition, too. However, then you would need to call ToFloat64 in your // regular code, potentially quite often. The recommended way is to track the // number of items conventionally (in the way you would have done it without // considering Prometheus metrics) and then expose the number with a // prometheus.GaugeFunc. func ToFloat64(c prometheus.Collector) float64 { var ( m prometheus.Metric mCount int mChan = make(chan prometheus.Metric) done = make(chan struct{}) ) go func() { for m = range mChan { mCount++ } close(done) }() c.Collect(mChan) close(mChan) <-done if mCount != 1 { panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount)) } pb := &dto.Metric{} m.Write(pb) if pb.Gauge != nil { return pb.Gauge.GetValue() } if pb.Counter != nil { return pb.Counter.GetValue() } if pb.Untyped != nil { return pb.Untyped.GetValue() } panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb)) } // CollectAndCount registers the provided Collector with a newly created // pedantic Registry. It then calls GatherAndCount with that Registry and with // the provided metricNames. In the unlikely case that the registration or the // gathering fails, this function panics. (This is inconsistent with the other // CollectAnd… functions in this package and has historical reasons. Changing // the function signature would be a breaking change and will therefore only // happen with the next major version bump.) func CollectAndCount(c prometheus.Collector, metricNames ...string) int { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { panic(fmt.Errorf("registering collector failed: %s", err)) } result, err := GatherAndCount(reg, metricNames...) if err != nil { panic(err) } return result } // GatherAndCount gathers all metrics from the provided Gatherer and counts // them. It returns the number of metric children in all gathered metric // families together. If any metricNames are provided, only metrics with those // names are counted. func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { got, err := g.Gather() if err != nil { return 0, fmt.Errorf("gathering metrics failed: %s", err) } if metricNames != nil { got = filterMetrics(got, metricNames) } result := 0 for _, mf := range got { result += len(mf.GetMetric()) } return result, nil } // CollectAndCompare registers the provided Collector with a newly created // pedantic Registry. It then calls GatherAndCompare with that Registry and with // the provided metricNames. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { return fmt.Errorf("registering collector failed: %s", err) } return GatherAndCompare(reg, expected, metricNames...) } // GatherAndCompare gathers all metrics from the provided Gatherer and compares // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { got, err := g.Gather() if err != nil { return fmt.Errorf("gathering metrics failed: %s", err) } if metricNames != nil { got = filterMetrics(got, metricNames) } var tp expfmt.TextParser wantRaw, err := tp.TextToMetricFamilies(expected) if err != nil { return fmt.Errorf("parsing expected metrics failed: %s", err) } want := internal.NormalizeMetricFamilies(wantRaw) return compare(got, want) } // compare encodes both provided slices of metric families into the text format, // compares their string message, and returns an error if they do not match. // The error contains the encoded text of both the desired and the actual // result. func compare(got, want []*dto.MetricFamily) error { var gotBuf, wantBuf bytes.Buffer enc := expfmt.NewEncoder(&gotBuf, expfmt.FmtText) for _, mf := range got { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding gathered metrics failed: %s", err) } } enc = expfmt.NewEncoder(&wantBuf, expfmt.FmtText) for _, mf := range want { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding expected metrics failed: %s", err) } } if wantBuf.String() != gotBuf.String() { return fmt.Errorf(` metric output does not match expectation; want: %s got: %s`, wantBuf.String(), gotBuf.String()) } return nil } func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily { var filtered []*dto.MetricFamily for _, m := range metrics { for _, name := range names { if m.GetName() == name { filtered = append(filtered, m) break } } } return filtered } client_golang-1.11.0/prometheus/testutil/testutil_test.go000066400000000000000000000212441405741072000237150ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package testutil import ( "strings" "testing" "github.com/prometheus/client_golang/prometheus" ) type untypedCollector struct{} func (u untypedCollector) Describe(c chan<- *prometheus.Desc) { c <- prometheus.NewDesc("name", "help", nil, nil) } func (u untypedCollector) Collect(c chan<- prometheus.Metric) { c <- prometheus.MustNewConstMetric( prometheus.NewDesc("name", "help", nil, nil), prometheus.UntypedValue, 2001, ) } func TestToFloat64(t *testing.T) { gaugeWithAValueSet := prometheus.NewGauge(prometheus.GaugeOpts{}) gaugeWithAValueSet.Set(3.14) counterVecWithOneElement := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"foo"}) counterVecWithOneElement.WithLabelValues("bar").Inc() counterVecWithTwoElements := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"foo"}) counterVecWithTwoElements.WithLabelValues("bar").Add(42) counterVecWithTwoElements.WithLabelValues("baz").Inc() histogramVecWithOneElement := prometheus.NewHistogramVec(prometheus.HistogramOpts{}, []string{"foo"}) histogramVecWithOneElement.WithLabelValues("bar").Observe(2.7) scenarios := map[string]struct { collector prometheus.Collector panics bool want float64 }{ "simple counter": { collector: prometheus.NewCounter(prometheus.CounterOpts{}), panics: false, want: 0, }, "simple gauge": { collector: prometheus.NewGauge(prometheus.GaugeOpts{}), panics: false, want: 0, }, "simple untyped": { collector: untypedCollector{}, panics: false, want: 2001, }, "simple histogram": { collector: prometheus.NewHistogram(prometheus.HistogramOpts{}), panics: true, }, "simple summary": { collector: prometheus.NewSummary(prometheus.SummaryOpts{}), panics: true, }, "simple gauge with an actual value set": { collector: gaugeWithAValueSet, panics: false, want: 3.14, }, "counter vec with zero elements": { collector: prometheus.NewCounterVec(prometheus.CounterOpts{}, nil), panics: true, }, "counter vec with one element": { collector: counterVecWithOneElement, panics: false, want: 1, }, "counter vec with two elements": { collector: counterVecWithTwoElements, panics: true, }, "histogram vec with one element": { collector: histogramVecWithOneElement, panics: true, }, } for n, s := range scenarios { t.Run(n, func(t *testing.T) { defer func() { r := recover() if r == nil && s.panics { t.Error("expected panic") } else if r != nil && !s.panics { t.Error("unexpected panic: ", r) } // Any other combination is the expected outcome. }() if got := ToFloat64(s.collector); got != s.want { t.Errorf("want %f, got %f", s.want, got) } }) } } func TestCollectAndCompare(t *testing.T) { const metadata = ` # HELP some_total A value that represents a counter. # TYPE some_total counter ` c := prometheus.NewCounter(prometheus.CounterOpts{ Name: "some_total", Help: "A value that represents a counter.", ConstLabels: prometheus.Labels{ "label1": "value1", }, }) c.Inc() expected := ` some_total{ label1 = "value1" } 1 ` if err := CollectAndCompare(c, strings.NewReader(metadata+expected), "some_total"); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } } func TestCollectAndCompareNoLabel(t *testing.T) { const metadata = ` # HELP some_total A value that represents a counter. # TYPE some_total counter ` c := prometheus.NewCounter(prometheus.CounterOpts{ Name: "some_total", Help: "A value that represents a counter.", }) c.Inc() expected := ` some_total 1 ` if err := CollectAndCompare(c, strings.NewReader(metadata+expected), "some_total"); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } } func TestCollectAndCompareHistogram(t *testing.T) { inputs := []struct { name string c prometheus.Collector metadata string expect string observation float64 }{ { name: "Testing Histogram Collector", c: prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "some_histogram", Help: "An example of a histogram", Buckets: []float64{1, 2, 3}, }), metadata: ` # HELP some_histogram An example of a histogram # TYPE some_histogram histogram `, expect: ` some_histogram{le="1"} 0 some_histogram{le="2"} 0 some_histogram{le="3"} 1 some_histogram_bucket{le="+Inf"} 1 some_histogram_sum 2.5 some_histogram_count 1 `, observation: 2.5, }, { name: "Testing HistogramVec Collector", c: prometheus.NewHistogramVec(prometheus.HistogramOpts{ Name: "some_histogram", Help: "An example of a histogram", Buckets: []float64{1, 2, 3}, }, []string{"test"}), metadata: ` # HELP some_histogram An example of a histogram # TYPE some_histogram histogram `, expect: ` some_histogram_bucket{test="test",le="1"} 0 some_histogram_bucket{test="test",le="2"} 0 some_histogram_bucket{test="test",le="3"} 1 some_histogram_bucket{test="test",le="+Inf"} 1 some_histogram_sum{test="test"} 2.5 some_histogram_count{test="test"} 1 `, observation: 2.5, }, } for _, input := range inputs { switch collector := input.c.(type) { case prometheus.Histogram: collector.Observe(input.observation) case *prometheus.HistogramVec: collector.WithLabelValues("test").Observe(input.observation) default: t.Fatalf("unsuported collector tested") } t.Run(input.name, func(t *testing.T) { if err := CollectAndCompare(input.c, strings.NewReader(input.metadata+input.expect)); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } }) } } func TestNoMetricFilter(t *testing.T) { const metadata = ` # HELP some_total A value that represents a counter. # TYPE some_total counter ` c := prometheus.NewCounter(prometheus.CounterOpts{ Name: "some_total", Help: "A value that represents a counter.", ConstLabels: prometheus.Labels{ "label1": "value1", }, }) c.Inc() expected := ` some_total{label1="value1"} 1 ` if err := CollectAndCompare(c, strings.NewReader(metadata+expected)); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } } func TestMetricNotFound(t *testing.T) { const metadata = ` # HELP some_other_metric A value that represents a counter. # TYPE some_other_metric counter ` c := prometheus.NewCounter(prometheus.CounterOpts{ Name: "some_total", Help: "A value that represents a counter.", ConstLabels: prometheus.Labels{ "label1": "value1", }, }) c.Inc() expected := ` some_other_metric{label1="value1"} 1 ` expectedError := ` metric output does not match expectation; want: # HELP some_other_metric A value that represents a counter. # TYPE some_other_metric counter some_other_metric{label1="value1"} 1 got: # HELP some_total A value that represents a counter. # TYPE some_total counter some_total{label1="value1"} 1 ` err := CollectAndCompare(c, strings.NewReader(metadata+expected)) if err == nil { t.Error("Expected error, got no error.") } if err.Error() != expectedError { t.Errorf("Expected\n%#+v\nGot:\n%#+v", expectedError, err.Error()) } } func TestCollectAndCount(t *testing.T) { c := prometheus.NewCounterVec( prometheus.CounterOpts{ Name: "some_total", Help: "A value that represents a counter.", }, []string{"foo"}, ) if got, want := CollectAndCount(c), 0; got != want { t.Errorf("unexpected metric count, got %d, want %d", got, want) } c.WithLabelValues("bar") if got, want := CollectAndCount(c), 1; got != want { t.Errorf("unexpected metric count, got %d, want %d", got, want) } c.WithLabelValues("baz") if got, want := CollectAndCount(c), 2; got != want { t.Errorf("unexpected metric count, got %d, want %d", got, want) } if got, want := CollectAndCount(c, "some_total"), 2; got != want { t.Errorf("unexpected metric count, got %d, want %d", got, want) } if got, want := CollectAndCount(c, "some_other_total"), 0; got != want { t.Errorf("unexpected metric count, got %d, want %d", got, want) } } client_golang-1.11.0/prometheus/timer.go000066400000000000000000000033351405741072000202450ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import "time" // Timer is a helper type to time functions. Use NewTimer to create new // instances. type Timer struct { begin time.Time observer Observer } // NewTimer creates a new Timer. The provided Observer is used to observe a // duration in seconds. Timer is usually used to time a function call in the // following way: // func TimeMe() { // timer := NewTimer(myHistogram) // defer timer.ObserveDuration() // // Do actual work. // } func NewTimer(o Observer) *Timer { return &Timer{ begin: time.Now(), observer: o, } } // ObserveDuration records the duration passed since the Timer was created with // NewTimer. It calls the Observe method of the Observer provided during // construction with the duration in seconds as an argument. The observed // duration is also returned. ObserveDuration is usually called with a defer // statement. // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. func (t *Timer) ObserveDuration() time.Duration { d := time.Since(t.begin) if t.observer != nil { t.observer.Observe(d.Seconds()) } return d } client_golang-1.11.0/prometheus/timer_test.go000066400000000000000000000102441405741072000213010ustar00rootroot00000000000000// Copyright 2016 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "testing" dto "github.com/prometheus/client_model/go" ) func TestTimerObserve(t *testing.T) { var ( his = NewHistogram(HistogramOpts{Name: "test_histogram"}) sum = NewSummary(SummaryOpts{Name: "test_summary"}) gauge = NewGauge(GaugeOpts{Name: "test_gauge"}) ) func() { hisTimer := NewTimer(his) sumTimer := NewTimer(sum) gaugeTimer := NewTimer(ObserverFunc(gauge.Set)) defer hisTimer.ObserveDuration() defer sumTimer.ObserveDuration() defer gaugeTimer.ObserveDuration() }() m := &dto.Metric{} his.Write(m) if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for histogram, got %d", want, got) } m.Reset() sum.Write(m) if want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got { t.Errorf("want %d observations for summary, got %d", want, got) } m.Reset() gauge.Write(m) if got := m.GetGauge().GetValue(); got <= 0 { t.Errorf("want value > 0 for gauge, got %f", got) } } func TestTimerEmpty(t *testing.T) { emptyTimer := NewTimer(nil) emptyTimer.ObserveDuration() // Do nothing, just demonstrate it works without panic. } func TestTimerConditionalTiming(t *testing.T) { var ( his = NewHistogram(HistogramOpts{ Name: "test_histogram", }) timeMe = true m = &dto.Metric{} ) timedFunc := func() { timer := NewTimer(ObserverFunc(func(v float64) { if timeMe { his.Observe(v) } })) defer timer.ObserveDuration() } timedFunc() // This will time. his.Write(m) if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for histogram, got %d", want, got) } timeMe = false timedFunc() // This will not time again. m.Reset() his.Write(m) if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for histogram, got %d", want, got) } } func TestTimerByOutcome(t *testing.T) { var ( his = NewHistogramVec( HistogramOpts{Name: "test_histogram"}, []string{"outcome"}, ) outcome = "foo" m = &dto.Metric{} ) timedFunc := func() { timer := NewTimer(ObserverFunc(func(v float64) { his.WithLabelValues(outcome).Observe(v) })) defer timer.ObserveDuration() if outcome == "foo" { outcome = "bar" return } outcome = "foo" } timedFunc() his.WithLabelValues("foo").(Histogram).Write(m) if want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) } m.Reset() his.WithLabelValues("bar").(Histogram).Write(m) if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) } timedFunc() m.Reset() his.WithLabelValues("foo").(Histogram).Write(m) if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) } m.Reset() his.WithLabelValues("bar").(Histogram).Write(m) if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) } timedFunc() m.Reset() his.WithLabelValues("foo").(Histogram).Write(m) if want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for 'foo' histogram, got %d", want, got) } m.Reset() his.WithLabelValues("bar").(Histogram).Write(m) if want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got { t.Errorf("want %d observations for 'bar' histogram, got %d", want, got) } } client_golang-1.11.0/prometheus/untyped.go000066400000000000000000000031341405741072000206120ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus // UntypedOpts is an alias for Opts. See there for doc comments. type UntypedOpts Opts // UntypedFunc works like GaugeFunc but the collected metric is of type // "Untyped". UntypedFunc is useful to mirror an external metric of unknown // type. // // To create UntypedFunc instances, use NewUntypedFunc. type UntypedFunc interface { Metric Collector } // NewUntypedFunc creates a new UntypedFunc based on the provided // UntypedOpts. The value reported is determined by calling the given function // from within the Write method. Take into account that metric collection may // happen concurrently. If that results in concurrent calls to Write, like in // the case where an UntypedFunc is directly registered with Prometheus, the // provided function must be concurrency-safe. func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { return newValueFunc(NewDesc( BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), opts.Help, nil, opts.ConstLabels, ), UntypedValue, function) } client_golang-1.11.0/prometheus/value.go000066400000000000000000000151151405741072000202400ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "sort" "time" "unicode/utf8" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" dto "github.com/prometheus/client_model/go" ) // ValueType is an enumeration of metric types that represent a simple value. type ValueType int // Possible values for the ValueType enum. Use UntypedValue to mark a metric // with an unknown type. const ( _ ValueType = iota CounterValue GaugeValue UntypedValue ) // valueFunc is a generic metric for simple values retrieved on collect time // from a function. It implements Metric and Collector. Its effective type is // determined by ValueType. This is a low-level building block used by the // library to back the implementations of CounterFunc, GaugeFunc, and // UntypedFunc. type valueFunc struct { selfCollector desc *Desc valType ValueType function func() float64 labelPairs []*dto.LabelPair } // newValueFunc returns a newly allocated valueFunc with the given Desc and // ValueType. The value reported is determined by calling the given function // from within the Write method. Take into account that metric collection may // happen concurrently. If that results in concurrent calls to Write, like in // the case where a valueFunc is directly registered with Prometheus, the // provided function must be concurrency-safe. func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { result := &valueFunc{ desc: desc, valType: valueType, function: function, labelPairs: MakeLabelPairs(desc, nil), } result.init(result) return result } func (v *valueFunc) Desc() *Desc { return v.desc } func (v *valueFunc) Write(out *dto.Metric) error { return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) } // NewConstMetric returns a metric with one fixed value that cannot be // changed. Users of this package will not have much use for it in regular // operations. However, when implementing custom Collectors, it is useful as a // throw-away metric that is generated on the fly to send it to Prometheus in // the Collect method. NewConstMetric returns an error if the length of // labelValues is not consistent with the variable labels in Desc or if Desc is // invalid. func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { if desc.err != nil { return nil, desc.err } if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { return nil, err } return &constMetric{ desc: desc, valType: valueType, val: value, labelPairs: MakeLabelPairs(desc, labelValues), }, nil } // MustNewConstMetric is a version of NewConstMetric that panics where // NewConstMetric would have returned an error. func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { m, err := NewConstMetric(desc, valueType, value, labelValues...) if err != nil { panic(err) } return m } type constMetric struct { desc *Desc valType ValueType val float64 labelPairs []*dto.LabelPair } func (m *constMetric) Desc() *Desc { return m.desc } func (m *constMetric) Write(out *dto.Metric) error { return populateMetric(m.valType, m.val, m.labelPairs, nil, out) } func populateMetric( t ValueType, v float64, labelPairs []*dto.LabelPair, e *dto.Exemplar, m *dto.Metric, ) error { m.Label = labelPairs switch t { case CounterValue: m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} case GaugeValue: m.Gauge = &dto.Gauge{Value: proto.Float64(v)} case UntypedValue: m.Untyped = &dto.Untyped{Value: proto.Float64(v)} default: return fmt.Errorf("encountered unknown type %v", t) } return nil } // MakeLabelPairs is a helper function to create protobuf LabelPairs from the // variable and constant labels in the provided Desc. The values for the // variable labels are defined by the labelValues slice, which must be in the // same order as the corresponding variable labels in the Desc. // // This function is only needed for custom Metric implementations. See MetricVec // example. func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) if totalLen == 0 { // Super fast path. return nil } if len(desc.variableLabels) == 0 { // Moderately fast path. return desc.constLabelPairs } labelPairs := make([]*dto.LabelPair, 0, totalLen) for i, n := range desc.variableLabels { labelPairs = append(labelPairs, &dto.LabelPair{ Name: proto.String(n), Value: proto.String(labelValues[i]), }) } labelPairs = append(labelPairs, desc.constLabelPairs...) sort.Sort(labelPairSorter(labelPairs)) return labelPairs } // ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. const ExemplarMaxRunes = 64 // newExemplar creates a new dto.Exemplar from the provided values. An error is // returned if any of the label names or values are invalid or if the total // number of runes in the label names and values exceeds ExemplarMaxRunes. func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { e := &dto.Exemplar{} e.Value = proto.Float64(value) tsProto, err := ptypes.TimestampProto(ts) if err != nil { return nil, err } e.Timestamp = tsProto labelPairs := make([]*dto.LabelPair, 0, len(l)) var runes int for name, value := range l { if !checkLabelName(name) { return nil, fmt.Errorf("exemplar label name %q is invalid", name) } runes += utf8.RuneCountInString(name) if !utf8.ValidString(value) { return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) } runes += utf8.RuneCountInString(value) labelPairs = append(labelPairs, &dto.LabelPair{ Name: proto.String(name), Value: proto.String(value), }) } if runes > ExemplarMaxRunes { return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) } e.Label = labelPairs return e, nil } client_golang-1.11.0/prometheus/value_test.go000066400000000000000000000026541405741072000213030ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "testing" ) func TestNewConstMetricInvalidLabelValues(t *testing.T) { testCases := []struct { desc string labels Labels }{ { desc: "non utf8 label value", labels: Labels{"a": "\xFF"}, }, { desc: "not enough label values", labels: Labels{}, }, { desc: "too many label values", labels: Labels{"a": "1", "b": "2"}, }, } for _, test := range testCases { metricDesc := NewDesc( "sample_value", "sample value", []string{"a"}, Labels{}, ) expectPanic(t, func() { MustNewConstMetric(metricDesc, CounterValue, 0.3, "\xFF") }, fmt.Sprintf("WithLabelValues: expected panic because: %s", test.desc)) if _, err := NewConstMetric(metricDesc, CounterValue, 0.3, "\xFF"); err == nil { t.Errorf("NewConstMetric: expected error because: %s", test.desc) } } } client_golang-1.11.0/prometheus/vec.go000066400000000000000000000420101405741072000176730ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "sync" "github.com/prometheus/common/model" ) // MetricVec is a Collector to bundle metrics of the same name that differ in // their label values. MetricVec is not used directly but as a building block // for implementations of vectors of a given metric type, like GaugeVec, // CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be // used for custom Metric implementations. // // To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in // FooVec and initialize it with NewMetricVec. Implement wrappers for // GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather // than (Metric, error). Similarly, create a wrapper for CurryWith that returns // (*FooVec, error) rather than (*MetricVec, error). It is recommended to also // add the convenience methods WithLabelValues, With, and MustCurryWith, which // panic instead of returning errors. See also the MetricVec example. type MetricVec struct { *metricMap curry []curriedLabelValue // hashAdd and hashAddByte can be replaced for testing collision handling. hashAdd func(h uint64, s string) uint64 hashAddByte func(h uint64, b byte) uint64 } // NewMetricVec returns an initialized metricVec. func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { return &MetricVec{ metricMap: &metricMap{ metrics: map[uint64][]metricWithLabelValues{}, desc: desc, newMetric: newMetric, }, hashAdd: hashAdd, hashAddByte: hashAddByte, } } // DeleteLabelValues removes the metric where the variable labels are the same // as those passed in as labels (same order as the VariableLabels in Desc). It // returns true if a metric was deleted. // // It is not an error if the number of label values is not the same as the // number of VariableLabels in Desc. However, such inconsistent label count can // never match an actual metric, so the method will always return false in that // case. // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider Delete(Labels) as an // alternative to avoid that type of mistake. For higher label numbers, the // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). // See also the CounterVec example. func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { h, err := m.hashLabelValues(lvs) if err != nil { return false } return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) } // Delete deletes the metric where the variable labels are the same as those // passed in as labels. It returns true if a metric was deleted. // // It is not an error if the number and names of the Labels are inconsistent // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. // // This method is used for the same purpose as DeleteLabelValues(...string). See // there for pros and cons of the two methods. func (m *MetricVec) Delete(labels Labels) bool { h, err := m.hashLabels(labels) if err != nil { return false } return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) } // Without explicit forwarding of Describe, Collect, Reset, those methods won't // show up in GoDoc. // Describe implements Collector. func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } // Collect implements Collector. func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } // Reset deletes all metrics in this vector. func (m *MetricVec) Reset() { m.metricMap.Reset() } // CurryWith returns a vector curried with the provided labels, i.e. the // returned vector has those labels pre-set for all labeled operations performed // on it. The cardinality of the curried vector is reduced accordingly. The // order of the remaining labels stays the same (just with the curried labels // taken out of the sequence – which is relevant for the // (GetMetric)WithLabelValues methods). It is possible to curry a curried // vector, but only with labels not yet used for currying before. // // The metrics contained in the MetricVec are shared between the curried and // uncurried vectors. They are just accessed differently. Curried and uncurried // vectors behave identically in terms of collection. Only one must be // registered with a given registry (usually the uncurried version). The Reset // method deletes all metrics, even if called on a curried vector. // // Note that CurryWith is usually not called directly but through a wrapper // around MetricVec, implementing a vector for a specific Metric // implementation, for example GaugeVec. func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { var ( newCurry []curriedLabelValue oldCurry = m.curry iCurry int ) for i, label := range m.desc.variableLabels { val, ok := labels[label] if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if ok { return nil, fmt.Errorf("label name %q is already curried", label) } newCurry = append(newCurry, oldCurry[iCurry]) iCurry++ } else { if !ok { continue // Label stays uncurried. } newCurry = append(newCurry, curriedLabelValue{i, val}) } } if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { return nil, fmt.Errorf("%d unknown label(s) found during currying", l) } return &MetricVec{ metricMap: m.metricMap, curry: newCurry, hashAdd: m.hashAdd, hashAddByte: m.hashAddByte, }, nil } // GetMetricWithLabelValues returns the Metric for the given slice of label // values (same order as the variable labels in Desc). If that combination of // label values is accessed for the first time, a new Metric is created (by // calling the newMetric function provided during construction of the // MetricVec). // // It is possible to call this method without using the returned Metric to only // create the new Metric but leave it in its initial state. // // Keeping the Metric for later use is possible (and should be considered if // performance is critical), but keep in mind that Reset, DeleteLabelValues and // Delete can be used to delete the Metric from the MetricVec. In that case, the // Metric will still exist, but it will not be exported anymore, even if a // Metric with the same label values is created later. // // An error is returned if the number of label values is not the same as the // number of variable labels in Desc (minus any curried labels). // // Note that for more than one label value, this method is prone to mistakes // caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as // an alternative to avoid that type of mistake. For higher label numbers, the // latter has a much more readable (albeit more verbose) syntax, but it comes // with a performance overhead (for creating and processing the Labels map). // // Note that GetMetricWithLabelValues is usually not called directly but through // a wrapper around MetricVec, implementing a vector for a specific Metric // implementation, for example GaugeVec. func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { h, err := m.hashLabelValues(lvs) if err != nil { return nil, err } return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil } // GetMetricWith returns the Metric for the given Labels map (the label names // must match those of the variable labels in Desc). If that label map is // accessed for the first time, a new Metric is created. Implications of // creating a Metric without using it and keeping the Metric for later use // are the same as for GetMetricWithLabelValues. // // An error is returned if the number and names of the Labels are inconsistent // with those of the variable labels in Desc (minus any curried labels). // // This method is used for the same purpose as // GetMetricWithLabelValues(...string). See there for pros and cons of the two // methods. // // Note that GetMetricWith is usually not called directly but through a wrapper // around MetricVec, implementing a vector for a specific Metric implementation, // for example GaugeVec. func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { h, err := m.hashLabels(labels) if err != nil { return nil, err } return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil } func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { return 0, err } var ( h = hashNew() curry = m.curry iVals, iCurry int ) for i := 0; i < len(m.desc.variableLabels); i++ { if iCurry < len(curry) && curry[iCurry].index == i { h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { h = m.hashAdd(h, vals[iVals]) iVals++ } h = m.hashAddByte(h, model.SeparatorByte) } return h, nil } func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { return 0, err } var ( h = hashNew() curry = m.curry iCurry int ) for i, label := range m.desc.variableLabels { val, ok := labels[label] if iCurry < len(curry) && curry[iCurry].index == i { if ok { return 0, fmt.Errorf("label name %q is already curried", label) } h = m.hashAdd(h, curry[iCurry].value) iCurry++ } else { if !ok { return 0, fmt.Errorf("label name %q missing in label map", label) } h = m.hashAdd(h, val) } h = m.hashAddByte(h, model.SeparatorByte) } return h, nil } // metricWithLabelValues provides the metric and its label values for // disambiguation on hash collision. type metricWithLabelValues struct { values []string metric Metric } // curriedLabelValue sets the curried value for a label at the given index. type curriedLabelValue struct { index int value string } // metricMap is a helper for metricVec and shared between differently curried // metricVecs. type metricMap struct { mtx sync.RWMutex // Protects metrics. metrics map[uint64][]metricWithLabelValues desc *Desc newMetric func(labelValues ...string) Metric } // Describe implements Collector. It will send exactly one Desc to the provided // channel. func (m *metricMap) Describe(ch chan<- *Desc) { ch <- m.desc } // Collect implements Collector. func (m *metricMap) Collect(ch chan<- Metric) { m.mtx.RLock() defer m.mtx.RUnlock() for _, metrics := range m.metrics { for _, metric := range metrics { ch <- metric.metric } } } // Reset deletes all metrics in this vector. func (m *metricMap) Reset() { m.mtx.Lock() defer m.mtx.Unlock() for h := range m.metrics { delete(m.metrics, h) } } // deleteByHashWithLabelValues removes the metric from the hash bucket h. If // there are multiple matches in the bucket, use lvs to select a metric and // remove only that metric. func (m *metricMap) deleteByHashWithLabelValues( h uint64, lvs []string, curry []curriedLabelValue, ) bool { m.mtx.Lock() defer m.mtx.Unlock() metrics, ok := m.metrics[h] if !ok { return false } i := findMetricWithLabelValues(metrics, lvs, curry) if i >= len(metrics) { return false } if len(metrics) > 1 { old := metrics m.metrics[h] = append(metrics[:i], metrics[i+1:]...) old[len(old)-1] = metricWithLabelValues{} } else { delete(m.metrics, h) } return true } // deleteByHashWithLabels removes the metric from the hash bucket h. If there // are multiple matches in the bucket, use lvs to select a metric and remove // only that metric. func (m *metricMap) deleteByHashWithLabels( h uint64, labels Labels, curry []curriedLabelValue, ) bool { m.mtx.Lock() defer m.mtx.Unlock() metrics, ok := m.metrics[h] if !ok { return false } i := findMetricWithLabels(m.desc, metrics, labels, curry) if i >= len(metrics) { return false } if len(metrics) > 1 { old := metrics m.metrics[h] = append(metrics[:i], metrics[i+1:]...) old[len(old)-1] = metricWithLabelValues{} } else { delete(m.metrics, h) } return true } // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. func (m *metricMap) getOrCreateMetricWithLabelValues( hash uint64, lvs []string, curry []curriedLabelValue, ) Metric { m.mtx.RLock() metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) m.mtx.RUnlock() if ok { return metric } m.mtx.Lock() defer m.mtx.Unlock() metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) if !ok { inlinedLVs := inlineLabelValues(lvs, curry) metric = m.newMetric(inlinedLVs...) m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) } return metric } // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. func (m *metricMap) getOrCreateMetricWithLabels( hash uint64, labels Labels, curry []curriedLabelValue, ) Metric { m.mtx.RLock() metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) m.mtx.RUnlock() if ok { return metric } m.mtx.Lock() defer m.mtx.Unlock() metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) if !ok { lvs := extractLabelValues(m.desc, labels, curry) metric = m.newMetric(lvs...) m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) } return metric } // getMetricWithHashAndLabelValues gets a metric while handling possible // collisions in the hash space. Must be called while holding the read mutex. func (m *metricMap) getMetricWithHashAndLabelValues( h uint64, lvs []string, curry []curriedLabelValue, ) (Metric, bool) { metrics, ok := m.metrics[h] if ok { if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { return metrics[i].metric, true } } return nil, false } // getMetricWithHashAndLabels gets a metric while handling possible collisions in // the hash space. Must be called while holding read mutex. func (m *metricMap) getMetricWithHashAndLabels( h uint64, labels Labels, curry []curriedLabelValue, ) (Metric, bool) { metrics, ok := m.metrics[h] if ok { if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { return metrics[i].metric, true } } return nil, false } // findMetricWithLabelValues returns the index of the matching metric or // len(metrics) if not found. func findMetricWithLabelValues( metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, ) int { for i, metric := range metrics { if matchLabelValues(metric.values, lvs, curry) { return i } } return len(metrics) } // findMetricWithLabels returns the index of the matching metric or len(metrics) // if not found. func findMetricWithLabels( desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, ) int { for i, metric := range metrics { if matchLabels(desc, metric.values, labels, curry) { return i } } return len(metrics) } func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { if len(values) != len(lvs)+len(curry) { return false } var iLVs, iCurry int for i, v := range values { if iCurry < len(curry) && curry[iCurry].index == i { if v != curry[iCurry].value { return false } iCurry++ continue } if v != lvs[iLVs] { return false } iLVs++ } return true } func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { if len(values) != len(labels)+len(curry) { return false } iCurry := 0 for i, k := range desc.variableLabels { if iCurry < len(curry) && curry[iCurry].index == i { if values[i] != curry[iCurry].value { return false } iCurry++ continue } if values[i] != labels[k] { return false } } return true } func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { labelValues := make([]string, len(labels)+len(curry)) iCurry := 0 for i, k := range desc.variableLabels { if iCurry < len(curry) && curry[iCurry].index == i { labelValues[i] = curry[iCurry].value iCurry++ continue } labelValues[i] = labels[k] } return labelValues } func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { labelValues := make([]string, len(lvs)+len(curry)) var iCurry, iLVs int for i := range labelValues { if iCurry < len(curry) && curry[iCurry].index == i { labelValues[i] = curry[iCurry].value iCurry++ continue } labelValues[i] = lvs[iLVs] iLVs++ } return labelValues } client_golang-1.11.0/prometheus/vec_test.go000066400000000000000000000354411405741072000207440ustar00rootroot00000000000000// Copyright 2014 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "testing" dto "github.com/prometheus/client_model/go" ) func TestDelete(t *testing.T) { vec := NewGaugeVec( GaugeOpts{ Name: "test", Help: "helpless", }, []string{"l1", "l2"}, ) testDelete(t, vec) } func TestDeleteWithCollisions(t *testing.T) { vec := NewGaugeVec( GaugeOpts{ Name: "test", Help: "helpless", }, []string{"l1", "l2"}, ) vec.hashAdd = func(h uint64, s string) uint64 { return 1 } vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } testDelete(t, vec) } func testDelete(t *testing.T, vec *GaugeVec) { if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { t.Errorf("got %v, want %v", got, want) } vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { t.Errorf("got %v, want %v", got, want) } if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { t.Errorf("got %v, want %v", got, want) } vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { t.Errorf("got %v, want %v", got, want) } if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { t.Errorf("got %v, want %v", got, want) } vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { t.Errorf("got %v, want %v", got, want) } if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { t.Errorf("got %v, want %v", got, want) } } func TestDeleteLabelValues(t *testing.T) { vec := NewGaugeVec( GaugeOpts{ Name: "test", Help: "helpless", }, []string{"l1", "l2"}, ) testDeleteLabelValues(t, vec) } func TestDeleteLabelValuesWithCollisions(t *testing.T) { vec := NewGaugeVec( GaugeOpts{ Name: "test", Help: "helpless", }, []string{"l1", "l2"}, ) vec.hashAdd = func(h uint64, s string) uint64 { return 1 } vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } testDeleteLabelValues(t, vec) } func testDeleteLabelValues(t *testing.T, vec *GaugeVec) { if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { t.Errorf("got %v, want %v", got, want) } vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) vec.With(Labels{"l1": "v1", "l2": "v3"}).(Gauge).Set(42) // Add junk data for collision. if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { t.Errorf("got %v, want %v", got, want) } if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { t.Errorf("got %v, want %v", got, want) } if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want { t.Errorf("got %v, want %v", got, want) } vec.With(Labels{"l1": "v1", "l2": "v2"}).(Gauge).Set(42) // Delete out of order. if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { t.Errorf("got %v, want %v", got, want) } if got, want := vec.DeleteLabelValues("v1"), false; got != want { t.Errorf("got %v, want %v", got, want) } } func TestMetricVec(t *testing.T) { vec := NewGaugeVec( GaugeOpts{ Name: "test", Help: "helpless", }, []string{"l1", "l2"}, ) testMetricVec(t, vec) } func TestMetricVecWithCollisions(t *testing.T) { vec := NewGaugeVec( GaugeOpts{ Name: "test", Help: "helpless", }, []string{"l1", "l2"}, ) vec.hashAdd = func(h uint64, s string) uint64 { return 1 } vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } testMetricVec(t, vec) } func testMetricVec(t *testing.T, vec *GaugeVec) { vec.Reset() // Actually test Reset now! var pair [2]string // Keep track of metrics. expected := map[[2]string]int{} for i := 0; i < 1000; i++ { pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples. expected[pair]++ vec.WithLabelValues(pair[0], pair[1]).Inc() expected[[2]string{"v1", "v2"}]++ vec.WithLabelValues("v1", "v2").(Gauge).Inc() } var total int for _, metrics := range vec.metricMap.metrics { for _, metric := range metrics { total++ copy(pair[:], metric.values) var metricOut dto.Metric if err := metric.metric.Write(&metricOut); err != nil { t.Fatal(err) } actual := *metricOut.Gauge.Value var actualPair [2]string for i, label := range metricOut.Label { actualPair[i] = *label.Value } // Test output pair against metric.values to ensure we've selected // the right one. We check this to ensure the below check means // anything at all. if actualPair != pair { t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair) } if actual != float64(expected[pair]) { t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair]) } } } if total != len(expected) { t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected)) } vec.Reset() if len(vec.metricMap.metrics) > 0 { t.Fatalf("reset failed") } } func TestCounterVecEndToEndWithCollision(t *testing.T) { vec := NewCounterVec( CounterOpts{ Name: "test", Help: "helpless", }, []string{"labelname"}, ) vec.WithLabelValues("77kepQFQ8Kl").Inc() vec.WithLabelValues("!0IC=VloaY").Add(2) m := &dto.Metric{} if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil { t.Fatal(err) } if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want { t.Errorf("got label value %q, want %q", got, want) } if got, want := m.GetCounter().GetValue(), 1.; got != want { t.Errorf("got value %f, want %f", got, want) } m.Reset() if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil { t.Fatal(err) } if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want { t.Errorf("got label value %q, want %q", got, want) } if got, want := m.GetCounter().GetValue(), 2.; got != want { t.Errorf("got value %f, want %f", got, want) } } func TestCurryVec(t *testing.T) { vec := NewCounterVec( CounterOpts{ Name: "test", Help: "helpless", }, []string{"one", "two", "three"}, ) testCurryVec(t, vec) } func TestCurryVecWithCollisions(t *testing.T) { vec := NewCounterVec( CounterOpts{ Name: "test", Help: "helpless", }, []string{"one", "two", "three"}, ) vec.hashAdd = func(h uint64, s string) uint64 { return 1 } vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } testCurryVec(t, vec) } func testCurryVec(t *testing.T, vec *CounterVec) { assertMetrics := func(t *testing.T) { n := 0 for _, m := range vec.metricMap.metrics { n += len(m) } if n != 2 { t.Error("expected two metrics, got", n) } m := &dto.Metric{} c1, err := vec.GetMetricWithLabelValues("1", "2", "3") if err != nil { t.Fatal("unexpected error getting metric:", err) } c1.Write(m) if want, got := 1., m.GetCounter().GetValue(); want != got { t.Errorf("want %f as counter value, got %f", want, got) } m.Reset() c2, err := vec.GetMetricWithLabelValues("11", "22", "33") if err != nil { t.Fatal("unexpected error getting metric:", err) } c2.Write(m) if want, got := 1., m.GetCounter().GetValue(); want != got { t.Errorf("want %f as counter value, got %f", want, got) } } assertNoMetric := func(t *testing.T) { if n := len(vec.metricMap.metrics); n != 0 { t.Error("expected no metrics, got", n) } } t.Run("zero labels", func(t *testing.T) { c1 := vec.MustCurryWith(nil) c2 := vec.MustCurryWith(nil) c1.WithLabelValues("1", "2", "3").Inc() c2.With(Labels{"one": "11", "two": "22", "three": "33"}).Inc() assertMetrics(t) if !c1.Delete(Labels{"one": "1", "two": "2", "three": "3"}) { t.Error("deletion failed") } if !c2.DeleteLabelValues("11", "22", "33") { t.Error("deletion failed") } assertNoMetric(t) }) t.Run("first label", func(t *testing.T) { c1 := vec.MustCurryWith(Labels{"one": "1"}) c2 := vec.MustCurryWith(Labels{"one": "11"}) c1.WithLabelValues("2", "3").Inc() c2.With(Labels{"two": "22", "three": "33"}).Inc() assertMetrics(t) if c1.Delete(Labels{"two": "22", "three": "33"}) { t.Error("deletion unexpectedly succeeded") } if c2.DeleteLabelValues("2", "3") { t.Error("deletion unexpectedly succeeded") } if !c1.Delete(Labels{"two": "2", "three": "3"}) { t.Error("deletion failed") } if !c2.DeleteLabelValues("22", "33") { t.Error("deletion failed") } assertNoMetric(t) }) t.Run("middle label", func(t *testing.T) { c1 := vec.MustCurryWith(Labels{"two": "2"}) c2 := vec.MustCurryWith(Labels{"two": "22"}) c1.WithLabelValues("1", "3").Inc() c2.With(Labels{"one": "11", "three": "33"}).Inc() assertMetrics(t) if c1.Delete(Labels{"one": "11", "three": "33"}) { t.Error("deletion unexpectedly succeeded") } if c2.DeleteLabelValues("1", "3") { t.Error("deletion unexpectedly succeeded") } if !c1.Delete(Labels{"one": "1", "three": "3"}) { t.Error("deletion failed") } if !c2.DeleteLabelValues("11", "33") { t.Error("deletion failed") } assertNoMetric(t) }) t.Run("last label", func(t *testing.T) { c1 := vec.MustCurryWith(Labels{"three": "3"}) c2 := vec.MustCurryWith(Labels{"three": "33"}) c1.WithLabelValues("1", "2").Inc() c2.With(Labels{"one": "11", "two": "22"}).Inc() assertMetrics(t) if c1.Delete(Labels{"two": "22", "one": "11"}) { t.Error("deletion unexpectedly succeeded") } if c2.DeleteLabelValues("1", "2") { t.Error("deletion unexpectedly succeeded") } if !c1.Delete(Labels{"two": "2", "one": "1"}) { t.Error("deletion failed") } if !c2.DeleteLabelValues("11", "22") { t.Error("deletion failed") } assertNoMetric(t) }) t.Run("two labels", func(t *testing.T) { c1 := vec.MustCurryWith(Labels{"three": "3", "one": "1"}) c2 := vec.MustCurryWith(Labels{"three": "33", "one": "11"}) c1.WithLabelValues("2").Inc() c2.With(Labels{"two": "22"}).Inc() assertMetrics(t) if c1.Delete(Labels{"two": "22"}) { t.Error("deletion unexpectedly succeeded") } if c2.DeleteLabelValues("2") { t.Error("deletion unexpectedly succeeded") } if !c1.Delete(Labels{"two": "2"}) { t.Error("deletion failed") } if !c2.DeleteLabelValues("22") { t.Error("deletion failed") } assertNoMetric(t) }) t.Run("all labels", func(t *testing.T) { c1 := vec.MustCurryWith(Labels{"three": "3", "two": "2", "one": "1"}) c2 := vec.MustCurryWith(Labels{"three": "33", "one": "11", "two": "22"}) c1.WithLabelValues().Inc() c2.With(nil).Inc() assertMetrics(t) if !c1.Delete(Labels{}) { t.Error("deletion failed") } if !c2.DeleteLabelValues() { t.Error("deletion failed") } assertNoMetric(t) }) t.Run("double curry", func(t *testing.T) { c1 := vec.MustCurryWith(Labels{"three": "3"}).MustCurryWith(Labels{"one": "1"}) c2 := vec.MustCurryWith(Labels{"three": "33"}).MustCurryWith(Labels{"one": "11"}) c1.WithLabelValues("2").Inc() c2.With(Labels{"two": "22"}).Inc() assertMetrics(t) if c1.Delete(Labels{"two": "22"}) { t.Error("deletion unexpectedly succeeded") } if c2.DeleteLabelValues("2") { t.Error("deletion unexpectedly succeeded") } if !c1.Delete(Labels{"two": "2"}) { t.Error("deletion failed") } if !c2.DeleteLabelValues("22") { t.Error("deletion failed") } assertNoMetric(t) }) t.Run("use already curried label", func(t *testing.T) { c1 := vec.MustCurryWith(Labels{"three": "3"}) if _, err := c1.GetMetricWithLabelValues("1", "2", "3"); err == nil { t.Error("expected error when using already curried label") } if _, err := c1.GetMetricWith(Labels{"one": "1", "two": "2", "three": "3"}); err == nil { t.Error("expected error when using already curried label") } assertNoMetric(t) c1.WithLabelValues("1", "2").Inc() if c1.Delete(Labels{"one": "1", "two": "2", "three": "3"}) { t.Error("deletion unexpectedly succeeded") } if !c1.Delete(Labels{"one": "1", "two": "2"}) { t.Error("deletion failed") } assertNoMetric(t) }) t.Run("curry already curried label", func(t *testing.T) { if _, err := vec.MustCurryWith(Labels{"three": "3"}).CurryWith(Labels{"three": "33"}); err == nil { t.Error("currying unexpectedly succeeded") } else if err.Error() != `label name "three" is already curried` { t.Error("currying returned unexpected error:", err) } }) t.Run("unknown label", func(t *testing.T) { if _, err := vec.CurryWith(Labels{"foo": "bar"}); err == nil { t.Error("currying unexpectedly succeeded") } else if err.Error() != "1 unknown label(s) found during currying" { t.Error("currying returned unexpected error:", err) } }) } func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) { benchmarkMetricVecWithLabelValues(b, map[string][]string{ "l1": {"onevalue"}, "l2": {"twovalue"}, }) } func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) { benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10) } func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) { benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10) } func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) { benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100) } func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) { benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100) } func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) { benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000) } func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) { labels := map[string][]string{} for i := 0; i < nkeys; i++ { var ( k = fmt.Sprintf("key-%v", i) vs = make([]string, 0, nvalues) ) for j := 0; j < nvalues; j++ { vs = append(vs, fmt.Sprintf("value-%v", j)) } labels[k] = vs } benchmarkMetricVecWithLabelValues(b, labels) } func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) { var keys []string for k := range labels { // Map order dependent, who cares though. keys = append(keys, k) } values := make([]string, len(labels)) // Value cache for permutations. vec := NewGaugeVec( GaugeOpts{ Name: "test", Help: "helpless", }, keys, ) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { // Varies input across provide map entries based on key size. for j, k := range keys { candidates := labels[k] values[j] = candidates[i%len(candidates)] } vec.WithLabelValues(values...) } } client_golang-1.11.0/prometheus/wrap.go000066400000000000000000000152171405741072000201000ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "sort" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" ) // WrapRegistererWith returns a Registerer wrapping the provided // Registerer. Collectors registered with the returned Registerer will be // registered with the wrapped Registerer in a modified way. The modified // Collector adds the provided Labels to all Metrics it collects (as // ConstLabels). The Metrics collected by the unmodified Collector must not // duplicate any of those labels. Wrapping a nil value is valid, resulting // in a no-op Registerer. // // WrapRegistererWith provides a way to add fixed labels to a subset of // Collectors. It should not be used to add fixed labels to all metrics // exposed. See also // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels // // Conflicts between Collectors registered through the original Registerer with // Collectors registered through the wrapping Registerer will still be // detected. Any AlreadyRegisteredError returned by the Register method of // either Registerer will contain the ExistingCollector in the form it was // provided to the respective registry. // // The Collector example demonstrates a use of WrapRegistererWith. func WrapRegistererWith(labels Labels, reg Registerer) Registerer { return &wrappingRegisterer{ wrappedRegisterer: reg, labels: labels, } } // WrapRegistererWithPrefix returns a Registerer wrapping the provided // Registerer. Collectors registered with the returned Registerer will be // registered with the wrapped Registerer in a modified way. The modified // Collector adds the provided prefix to the name of all Metrics it collects. // Wrapping a nil value is valid, resulting in a no-op Registerer. // // WrapRegistererWithPrefix is useful to have one place to prefix all metrics of // a sub-system. To make this work, register metrics of the sub-system with the // wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful // to use the same prefix for all metrics exposed. In particular, do not prefix // metric names that are standardized across applications, as that would break // horizontal monitoring, for example the metrics provided by the Go collector // (see NewGoCollector) and the process collector (see NewProcessCollector). (In // fact, those metrics are already prefixed with “go_” or “process_”, // respectively.) // // Conflicts between Collectors registered through the original Registerer with // Collectors registered through the wrapping Registerer will still be // detected. Any AlreadyRegisteredError returned by the Register method of // either Registerer will contain the ExistingCollector in the form it was // provided to the respective registry. func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { return &wrappingRegisterer{ wrappedRegisterer: reg, prefix: prefix, } } type wrappingRegisterer struct { wrappedRegisterer Registerer prefix string labels Labels } func (r *wrappingRegisterer) Register(c Collector) error { if r.wrappedRegisterer == nil { return nil } return r.wrappedRegisterer.Register(&wrappingCollector{ wrappedCollector: c, prefix: r.prefix, labels: r.labels, }) } func (r *wrappingRegisterer) MustRegister(cs ...Collector) { if r.wrappedRegisterer == nil { return } for _, c := range cs { if err := r.Register(c); err != nil { panic(err) } } } func (r *wrappingRegisterer) Unregister(c Collector) bool { if r.wrappedRegisterer == nil { return false } return r.wrappedRegisterer.Unregister(&wrappingCollector{ wrappedCollector: c, prefix: r.prefix, labels: r.labels, }) } type wrappingCollector struct { wrappedCollector Collector prefix string labels Labels } func (c *wrappingCollector) Collect(ch chan<- Metric) { wrappedCh := make(chan Metric) go func() { c.wrappedCollector.Collect(wrappedCh) close(wrappedCh) }() for m := range wrappedCh { ch <- &wrappingMetric{ wrappedMetric: m, prefix: c.prefix, labels: c.labels, } } } func (c *wrappingCollector) Describe(ch chan<- *Desc) { wrappedCh := make(chan *Desc) go func() { c.wrappedCollector.Describe(wrappedCh) close(wrappedCh) }() for desc := range wrappedCh { ch <- wrapDesc(desc, c.prefix, c.labels) } } func (c *wrappingCollector) unwrapRecursively() Collector { switch wc := c.wrappedCollector.(type) { case *wrappingCollector: return wc.unwrapRecursively() default: return wc } } type wrappingMetric struct { wrappedMetric Metric prefix string labels Labels } func (m *wrappingMetric) Desc() *Desc { return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) } func (m *wrappingMetric) Write(out *dto.Metric) error { if err := m.wrappedMetric.Write(out); err != nil { return err } if len(m.labels) == 0 { // No wrapping labels. return nil } for ln, lv := range m.labels { out.Label = append(out.Label, &dto.LabelPair{ Name: proto.String(ln), Value: proto.String(lv), }) } sort.Sort(labelPairSorter(out.Label)) return nil } func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { constLabels := Labels{} for _, lp := range desc.constLabelPairs { constLabels[*lp.Name] = *lp.Value } for ln, lv := range labels { if _, alreadyUsed := constLabels[ln]; alreadyUsed { return &Desc{ fqName: desc.fqName, help: desc.help, variableLabels: desc.variableLabels, constLabelPairs: desc.constLabelPairs, err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), } } constLabels[ln] = lv } // NewDesc will do remaining validations. newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) // Propagate errors if there was any. This will override any errer // created by NewDesc above, i.e. earlier errors get precedence. if desc.err != nil { newDesc.err = desc.err } return newDesc } client_golang-1.11.0/prometheus/wrap_test.go000066400000000000000000000232741405741072000211410ustar00rootroot00000000000000// Copyright 2018 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package prometheus import ( "fmt" "reflect" "strings" "testing" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/golang/protobuf/proto" dto "github.com/prometheus/client_model/go" ) // uncheckedCollector wraps a Collector but its Describe method yields no Desc. type uncheckedCollector struct { c Collector } func (u uncheckedCollector) Describe(_ chan<- *Desc) {} func (u uncheckedCollector) Collect(c chan<- Metric) { u.c.Collect(c) } func toMetricFamilies(cs ...Collector) []*dto.MetricFamily { reg := NewRegistry() reg.MustRegister(cs...) out, err := reg.Gather() if err != nil { panic(err) } return out } func TestWrap(t *testing.T) { simpleCnt := NewCounter(CounterOpts{ Name: "simpleCnt", Help: "helpSimpleCnt", }) simpleCnt.Inc() simpleGge := NewGauge(GaugeOpts{ Name: "simpleGge", Help: "helpSimpleGge", }) simpleGge.Set(3.14) preCnt := NewCounter(CounterOpts{ Name: "pre_simpleCnt", Help: "helpSimpleCnt", }) preCnt.Inc() barLabeledCnt := NewCounter(CounterOpts{ Name: "simpleCnt", Help: "helpSimpleCnt", ConstLabels: Labels{"foo": "bar"}, }) barLabeledCnt.Inc() bazLabeledCnt := NewCounter(CounterOpts{ Name: "simpleCnt", Help: "helpSimpleCnt", ConstLabels: Labels{"foo": "baz"}, }) bazLabeledCnt.Inc() labeledPreCnt := NewCounter(CounterOpts{ Name: "pre_simpleCnt", Help: "helpSimpleCnt", ConstLabels: Labels{"foo": "bar"}, }) labeledPreCnt.Inc() twiceLabeledPreCnt := NewCounter(CounterOpts{ Name: "pre_simpleCnt", Help: "helpSimpleCnt", ConstLabels: Labels{"foo": "bar", "dings": "bums"}, }) twiceLabeledPreCnt.Inc() barLabeledUncheckedCollector := uncheckedCollector{barLabeledCnt} scenarios := map[string]struct { prefix string // First wrap with this prefix. labels Labels // Then wrap the result with these labels. labels2 Labels // If any, wrap the prefix-wrapped one again. preRegister []Collector toRegister []struct { // If there are any labels2, register every other with that one. collector Collector registrationFails bool } gatherFails bool output []Collector }{ "wrap nothing": { prefix: "pre_", labels: Labels{"foo": "bar"}, }, "wrap with nothing": { preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, false}}, output: []Collector{simpleGge, simpleCnt}, }, "wrap counter with prefix": { prefix: "pre_", preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, false}}, output: []Collector{simpleGge, preCnt}, }, "wrap counter with label pair": { labels: Labels{"foo": "bar"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, false}}, output: []Collector{simpleGge, barLabeledCnt}, }, "wrap counter with label pair and prefix": { prefix: "pre_", labels: Labels{"foo": "bar"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, false}}, output: []Collector{simpleGge, labeledPreCnt}, }, "wrap counter with invalid prefix": { prefix: "1+1", preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, true}}, output: []Collector{simpleGge}, }, "wrap counter with invalid label": { preRegister: []Collector{simpleGge}, labels: Labels{"42": "bar"}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, true}}, output: []Collector{simpleGge}, }, "counter registered twice but wrapped with different label values": { labels: Labels{"foo": "bar"}, labels2: Labels{"foo": "baz"}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, false}, {simpleCnt, false}}, output: []Collector{barLabeledCnt, bazLabeledCnt}, }, "counter registered twice but wrapped with different inconsistent label values": { labels: Labels{"foo": "bar"}, labels2: Labels{"bar": "baz"}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, false}, {simpleCnt, true}}, output: []Collector{barLabeledCnt}, }, "wrap counter with prefix and two labels": { prefix: "pre_", labels: Labels{"foo": "bar", "dings": "bums"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{simpleCnt, false}}, output: []Collector{simpleGge, twiceLabeledPreCnt}, }, "wrap labeled counter with prefix and another label": { prefix: "pre_", labels: Labels{"dings": "bums"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{barLabeledCnt, false}}, output: []Collector{simpleGge, twiceLabeledPreCnt}, }, "wrap labeled counter with prefix and inconsistent label": { prefix: "pre_", labels: Labels{"foo": "bums"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{barLabeledCnt, true}}, output: []Collector{simpleGge}, }, "wrap labeled counter with prefix and the same label again": { prefix: "pre_", labels: Labels{"foo": "bar"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{barLabeledCnt, true}}, output: []Collector{simpleGge}, }, "wrap labeled unchecked collector with prefix and another label": { prefix: "pre_", labels: Labels{"dings": "bums"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{barLabeledUncheckedCollector, false}}, output: []Collector{simpleGge, twiceLabeledPreCnt}, }, "wrap labeled unchecked collector with prefix and inconsistent label": { prefix: "pre_", labels: Labels{"foo": "bums"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{barLabeledUncheckedCollector, false}}, gatherFails: true, output: []Collector{simpleGge}, }, "wrap labeled unchecked collector with prefix and the same label again": { prefix: "pre_", labels: Labels{"foo": "bar"}, preRegister: []Collector{simpleGge}, toRegister: []struct { collector Collector registrationFails bool }{{barLabeledUncheckedCollector, false}}, gatherFails: true, output: []Collector{simpleGge}, }, "wrap labeled unchecked collector with prefix and another label resulting in collision with pre-registered counter": { prefix: "pre_", labels: Labels{"dings": "bums"}, preRegister: []Collector{twiceLabeledPreCnt}, toRegister: []struct { collector Collector registrationFails bool }{{barLabeledUncheckedCollector, false}}, gatherFails: true, output: []Collector{twiceLabeledPreCnt}, }, } for n, s := range scenarios { t.Run(n, func(t *testing.T) { reg := NewPedanticRegistry() for _, c := range s.preRegister { if err := reg.Register(c); err != nil { t.Fatal("error registering with unwrapped registry:", err) } } preReg := WrapRegistererWithPrefix(s.prefix, reg) lReg := WrapRegistererWith(s.labels, preReg) l2Reg := WrapRegistererWith(s.labels2, preReg) for i, tr := range s.toRegister { var err error if i%2 != 0 && len(s.labels2) != 0 { err = l2Reg.Register(tr.collector) } else { err = lReg.Register(tr.collector) } if tr.registrationFails && err == nil { t.Fatalf("registration with wrapping registry unexpectedly succeeded for collector #%d", i) } if !tr.registrationFails && err != nil { t.Fatalf("registration with wrapping registry failed for collector #%d: %s", i, err) } } wantMF := toMetricFamilies(s.output...) gotMF, err := reg.Gather() if s.gatherFails && err == nil { t.Fatal("gathering unexpectedly succeeded") } if !s.gatherFails && err != nil { t.Fatal("gathering failed:", err) } if !reflect.DeepEqual(gotMF, wantMF) { var want, got []string for i, mf := range wantMF { want = append(want, fmt.Sprintf("%3d: %s", i, proto.MarshalTextString(mf))) } for i, mf := range gotMF { got = append(got, fmt.Sprintf("%3d: %s", i, proto.MarshalTextString(mf))) } t.Fatalf( "unexpected output of gathering:\n\nWANT:\n%s\n\nGOT:\n%s\n", strings.Join(want, "\n"), strings.Join(got, "\n"), ) } }) } } func TestNil(t *testing.T) { // A wrapped nil registerer should be treated as a no-op, and not panic. c := NewCounter(CounterOpts{Name: "test"}) err := WrapRegistererWith(Labels{"foo": "bar"}, nil).Register(c) if err != nil { t.Fatal("registering failed:", err) } }